code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
/* * * * Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com> * */ package play.api.libs.ws.ssl.debug import java.lang.reflect.Field import java.security.PrivilegedExceptionAction import play.api.libs.ws.ssl.MonkeyPatcher /** * A privileged action that will find relevant classes containing static final fields of type T and replace * them with the object referenced by {{newDebug}}, and switch out the "args" field value with the value defined * in {{newOptions}}. This is the only way to change JSSE debugging after the class loads. */ abstract class FixLoggingAction extends PrivilegedExceptionAction[Unit] with MonkeyPatcher with ClassFinder { def newOptions: String def isValidField(field: Field, definedType: Class[_]): Boolean = { import java.lang.reflect.Modifier._ val modifiers: Int = field.getModifiers field.getType == definedType && isStatic(modifiers) } }
jyotikamboj/container
pf-framework/src/play-ws/src/main/scala/play/api/libs/ws/ssl/debug/FixLoggingAction.scala
Scala
mit
914
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.nn.ops import com.intel.analytics.bigdl.tensor.Tensor import com.intel.analytics.bigdl.utils.T import org.scalatest.{FlatSpec, Matchers} class PadSpec extends FlatSpec with Matchers { "Pad operation" should "works correctly" in { import com.intel.analytics.bigdl.numeric.NumericFloat val input = Tensor(T( T( T(1f, 2f, 3f), T(4f, 5f, 6f)), T( T(1f, 2f, 3f), T(4f, 5f, 6f)) )) val padding = Tensor[Int](T(T(1, 2), T(1, 2), T(1, 2))) val expectOutput = Tensor( T( T( T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f)), T( T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 1f, 2f, 3f, 0f, 0f), T(0f, 4f, 5f, 6f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f)), T( T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 1f, 2f, 3f, 0f, 0f), T(0f, 4f, 5f, 6f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f)), T( T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f)), T( T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f), T(0f, 0f, 0f, 0f, 0f, 0f))) ) val output = Pad[Float, Float](mode = "CONSTANT", 0.0f).forward(T(input, padding)) output should be(expectOutput) } }
jenniew/BigDL
spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/ops/PadSpec.scala
Scala
apache-2.0
2,313
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.carbondata.spark.testsuite.dblocation import org.apache.carbondata.core.constants.CarbonCommonConstants import org.apache.carbondata.core.datastore.impl.FileFactory import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil} import org.apache.spark.sql.{AnalysisException, CarbonEnv, Row} import org.apache.spark.sql.test.util.QueryTest import org.scalatest.BeforeAndAfterAll /** * */ class DBLocationCarbonTableTestCase extends QueryTest with BeforeAndAfterAll { def getMdtFileAndType() = { // if mdt file path is configured then take configured path else take default path val configuredMdtPath = CarbonProperties.getInstance() .getProperty(CarbonCommonConstants.CARBON_UPDATE_SYNC_FOLDER, CarbonCommonConstants.CARBON_UPDATE_SYNC_FOLDER_DEFAULT).trim var timestampFile = configuredMdtPath + "/" + CarbonCommonConstants.SCHEMAS_MODIFIED_TIME_FILE timestampFile = CarbonUtil.checkAndAppendFileSystemURIScheme(timestampFile) val timestampFileType = FileFactory.getFileType(timestampFile) (timestampFile, timestampFileType) } override def beforeAll { sql("drop database if exists carbon cascade") } //TODO fix this test case test("Update operation on carbon table with insert into") { sql("drop database if exists carbon2 cascade") sql(s"create database carbon2 location '$dblocation'") sql("use carbon2") sql("""create table carbontable (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""") sql("insert into carbontable select 'a',1,'aa','aaa'") sql("insert into carbontable select 'b',1,'bb','bbb'") // update operation sql("""update carbontable d set (d.c2) = (d.c2 + 1) where d.c1 = 'a'""").show() sql("""update carbontable d set (d.c2) = (d.c2 + 1) where d.c1 = 'b'""").show() checkAnswer( sql("""select c1,c2,c3,c5 from carbontable"""), Seq(Row("a",2,"aa","aaa"),Row("b",2,"bb","bbb")) ) sql("drop database if exists carbon2 cascade") } test("create and drop database test") { sql(s"create database carbon location '$dblocation'") sql("drop database if exists carbon cascade") } test("create two databases at same table") { sql(s"create database carbon location '$dblocation'") try { sql(s"create database carbon1 location '$dblocation'") } catch { case e: AnalysisException => assert(true) } } test("create table and load data") { sql("drop database if exists carbon cascade") sql(s"create database carbon location '$dblocation'") sql("use carbon") sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""") sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/dblocation/test.csv' INTO table carbon.carbontable""") checkAnswer(sql("select count(*) from carbontable"), Row(5)) } test("create table and insert data") { sql("drop database if exists carbon cascade") sql(s"create database carbon location '$dblocation'") sql("use carbon") sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""") sql("insert into carbontable select 'a',1,'aa','aaa'") checkAnswer(sql("select count(*) from carbontable"), Row(1)) checkAnswer(sql("select c1 from carbontable"), Seq(Row("a"))) } test("create table and 2 times data load") { sql("drop database if exists carbon cascade") sql(s"create database carbon location '$dblocation'") sql("use carbon") sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""") sql("insert into carbontable select 'a',1,'aa','aaa'") sql("insert into carbontable select 'b',1,'aa','aaa'") checkAnswer(sql("select count(*) from carbontable"), Row(2)) checkAnswer(sql("select c1 from carbontable"), Seq(Row("a"), Row("b"))) } test("Update operation on carbon table") { sql("drop database if exists carbon1 cascade") sql(s"create database carbon1 location '$dblocation'") sql("use carbon1") sql( """ CREATE TABLE automerge(id int, name string, city string, age int) STORED BY 'org.apache.carbondata.format' """) val testData = s"$resourcesPath/sample.csv" sql(s"LOAD DATA LOCAL INPATH '$testData' into table automerge") // update operation sql("""update carbon1.automerge d set (d.id) = (d.id + 1) where d.id > 2""").show() checkAnswer( sql("select count(*) from automerge"), Seq(Row(6)) ) // sql("drop table carbontable") } test("Delete operation on carbon table") { sql("drop database if exists carbon1 cascade") sql(s"create database carbon1 location '$dblocation'") sql("use carbon1") sql("""create table carbon1.carbontable (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""") sql("insert into carbontable select 'a',1,'aa','aaa'") sql("insert into carbontable select 'b',1,'bb','bbb'") // delete operation sql("""delete from carbontable where c3 = 'aa'""").show checkAnswer( sql("""select c1,c2,c3,c5 from carbon1.carbontable"""), Seq(Row("b",1,"bb","bbb")) ) sql("drop table carbontable") } test("Alter table add column test") { sql("drop database if exists carbon cascade") sql(s"create database carbon location '$dblocation'") sql("use carbon") sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""") sql("insert into carbontable select 'a',1,'aa','aaa'") sql("insert into carbontable select 'b',1,'bb','bbb'") sql("Alter table carbontable add columns(c4 string) " + "TBLPROPERTIES('DICTIONARY_EXCLUDE'='c4', 'DEFAULT.VALUE.c4'='def')") checkAnswer( sql("""select c1,c2,c3,c5,c4 from carbon.carbontable"""), Seq(Row("a",1,"aa","aaa","def"), Row("b",1,"bb","bbb","def")) ) sql("drop table carbontable") } test("Alter table change column datatype test") { sql("drop database if exists carbon cascade") sql(s"create database carbon location '$dblocation'") sql("use carbon") sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""") sql("insert into carbontable select 'a',1,'aa','aaa'") sql("insert into carbontable select 'b',1,'bb','bbb'") sql("Alter table carbontable change c2 c2 long") checkAnswer( sql("""select c1,c2,c3,c5 from carbon.carbontable"""), Seq(Row("a",1,"aa","aaa"), Row("b",1,"bb","bbb")) ) sql("drop table carbontable") } test("Alter table change dataType with sort column after adding measure column test"){ sql("drop database if exists carbon cascade") sql(s"create database carbon location '$dblocation'") sql("use carbon") sql( """create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) |STORED BY 'org.apache.carbondata.format' |TBLPROPERTIES('SORT_COLUMNS' = 'c2') |""".stripMargin) sql("insert into carbontable select 'a',1,'aa','aaa'") sql("insert into carbontable select 'b',1,'bb','bbb'") sql("Alter table carbontable add columns (c6 int)") sql("Alter table carbontable change c2 c2 bigint") checkAnswer( sql("""select c1,c2,c3,c5 from carbon.carbontable"""), Seq(Row("a",1,"aa","aaa"), Row("b",1,"bb","bbb")) ) sql("drop table carbontable") } test("Alter table change dataType with sort column after adding date datatype with default value test"){ sql("drop database if exists carbon cascade") sql(s"create database carbon location '$dblocation'") sql("use carbon") sql( """create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) |STORED BY 'org.apache.carbondata.format' |TBLPROPERTIES('SORT_COLUMNS' = 'c2') |""".stripMargin) sql("insert into carbontable select 'a',1,'aa','aaa'") sql("insert into carbontable select 'b',1,'bb','bbb'") sql("Alter table carbontable add columns (dateData date) TBLPROPERTIES('DEFAULT.VALUE.dateData' = '1999-01-01')") sql("Alter table carbontable change c2 c2 bigint") checkAnswer( sql("""select c1,c2,c3,c5 from carbon.carbontable"""), Seq(Row("a",1,"aa","aaa"), Row("b",1,"bb","bbb")) ) sql("drop table carbontable") } test("Alter table change dataType with sort column after adding dimension column with default value test"){ sql("drop database if exists carbon cascade") sql(s"create database carbon location '$dblocation'") sql("use carbon") sql( """create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) |STORED BY 'org.apache.carbondata.format' |TBLPROPERTIES('SORT_COLUMNS' = 'c2') |""".stripMargin) sql("insert into carbontable select 'a',1,'aa','aaa'") sql("insert into carbontable select 'b',1,'bb','bbb'") sql("Alter table carbontable add columns (name String) TBLPROPERTIES('DEFAULT.VALUE.name' = 'hello')") sql("Alter table carbontable change c2 c2 bigint") checkAnswer( sql("""select c1,c2,c3,c5,name from carbon.carbontable"""), Seq(Row("a",1,"aa","aaa","hello"), Row("b",1,"bb","bbb","hello")) ) sql("drop table carbontable") } test("Alter table change dataType with sort column after rename test"){ sql("drop database if exists carbon cascade") sql(s"create database carbon location '$dblocation'") sql("use carbon") sql( """create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) |STORED BY 'org.apache.carbondata.format' |TBLPROPERTIES('SORT_COLUMNS' = 'c2') |""".stripMargin) sql("insert into carbontable select 'a',1,'aa','aaa'") sql("insert into carbontable select 'b',1,'bb','bbb'") sql("Alter table carbontable add columns (name String) TBLPROPERTIES('DEFAULT.VALUE.name' = 'hello')") sql("Alter table carbontable rename to carbontable1") sql("Alter table carbontable1 change c2 c2 bigint") checkAnswer( sql("""select c1,c2,c3,c5,name from carbon.carbontable1"""), Seq(Row("a",1,"aa","aaa","hello"), Row("b",1,"bb","bbb","hello")) ) sql("drop table if exists carbontable") sql("drop table if exists carbontable1") } test("Alter table drop column test") { sql("drop database if exists carbon cascade") sql(s"create database carbon location '$dblocation'") sql("use carbon") sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""") sql("insert into carbontable select 'a',1,'aa','aaa'") sql("insert into carbontable select 'b',1,'bb','bbb'") sql("Alter table carbontable drop columns(c2)") checkAnswer( sql("""select * from carbon.carbontable"""), Seq(Row("a","aa","aaa"), Row("b","bb","bbb")) ) sql("drop table carbontable") } test("test mdt file path with configured paths") { sql("drop database if exists carbon cascade") sql(s"create database carbon location '$dblocation'") sql("use carbon") CarbonProperties.getInstance() .addProperty(CarbonCommonConstants.CARBON_UPDATE_SYNC_FOLDER, "/tmp/carbondata1/carbondata2/") val (timestampFile, timestampFileType) = getMdtFileAndType() FileFactory.deleteFile(timestampFile, timestampFileType) sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""") sql("drop table carbontable") // perform file check assert(FileFactory.isFileExist(timestampFile, timestampFileType, true) || CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) CarbonProperties.getInstance() .addProperty(CarbonCommonConstants.CARBON_UPDATE_SYNC_FOLDER, CarbonCommonConstants.CARBON_UPDATE_SYNC_FOLDER_DEFAULT) val (timestampFile2, timestampFileType2) = getMdtFileAndType() FileFactory.deleteFile(timestampFile2, timestampFileType2) sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""") sql("drop table carbontable") // perform file check assert(FileFactory.isFileExist(timestampFile, timestampFileType, true) || CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) } override def afterAll { CarbonProperties.getInstance() .addProperty(CarbonCommonConstants.CARBON_UPDATE_SYNC_FOLDER, CarbonCommonConstants.CARBON_UPDATE_SYNC_FOLDER_DEFAULT) sql("use default") sql("drop database if exists carbon cascade") } }
ravipesala/incubator-carbondata
integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dblocation/DBLocationCarbonTableTestCase.scala
Scala
apache-2.0
13,619
// Generated by the Scala Plugin for the Protocol Buffer Compiler. // Do not edit! // // Protofile syntax: PROTO3 package io.scalaland.chimney.examples.pb.order @SerialVersionUID(0L) final case class OrderLine( item: _root_.scala.Option[io.scalaland.chimney.examples.pb.order.Item] = _root_.scala.None, quantity: _root_.scala.Int = 0 )
scalalandio/chimney
protos/src/test/scala/io.scalaland.chimney.examples.pb.order/OrderLine.scala
Scala
apache-2.0
350
package im.actor.server.session import scala.concurrent.duration._ import scala.concurrent.{ Await, Future, blocking } import scala.util.Random import akka.actor._ import akka.contrib.pattern.DistributedPubSubExtension import akka.stream.ActorMaterializer import akka.testkit.TestProbe import akka.util.Timeout import com.google.protobuf.ByteString import org.scalatest.concurrent.ScalaFutures import org.scalatest.time.{ Seconds, Span } import org.scalatest.{ FlatSpecLike, Matchers } import slick.driver.PostgresDriver import im.actor.api.rpc.RpcResult import im.actor.api.rpc.codecs._ import im.actor.api.rpc.sequence.{ SeqUpdate, WeakUpdate } import im.actor.server import im.actor.server.api.rpc.service.auth.AuthServiceImpl import im.actor.server.api.rpc.service.sequence.{ SequenceServiceConfig, SequenceServiceImpl } import im.actor.server.api.rpc.{ RpcApiService, RpcResultCodec } import im.actor.server.db.DbExtension import im.actor.server.mtproto.codecs.protocol.MessageBoxCodec import im.actor.server.mtproto.protocol._ import im.actor.server.mtproto.transport.MTPackage import im.actor.server.oauth.{ GoogleProvider, OAuth2GoogleConfig } import im.actor.server.presences.{ GroupPresenceManager, PresenceManager } import im.actor.server.sequence.WeakUpdatesManager import im.actor.server.session.SessionEnvelope.Payload import im.actor.server._ abstract class BaseSessionSpec(_system: ActorSystem = { server.ActorSpecification.createSystem() }) extends server.ActorSuite(_system) with FlatSpecLike with ScalaFutures with Matchers with ActorSerializerPrepare { override implicit def patienceConfig: PatienceConfig = new PatienceConfig(timeout = Span(30, Seconds)) protected implicit val timeout = Timeout(10.seconds) protected implicit val materializer = ActorMaterializer() protected implicit val ec = system.dispatcher protected implicit val db: PostgresDriver.api.Database = DbExtension(_system).db DbExtension(_system).clean() DbExtension(_system).migrate() protected implicit val weakUpdManagerRegion = WeakUpdatesManager.startRegion() protected implicit val presenceManagerRegion = PresenceManager.startRegion() protected implicit val groupPresenceManagerRegion = GroupPresenceManager.startRegion() protected val mediator = DistributedPubSubExtension(system).mediator protected implicit val sessionConfig = SessionConfig.load(system.settings.config.getConfig("session")) Session.startRegion(Some(Session.props(mediator))) protected implicit val sessionRegion = Session.startRegionProxy() protected val oauthGoogleConfig = OAuth2GoogleConfig.load(system.settings.config.getConfig("services.google.oauth")) protected implicit val oauth2Service = new GoogleProvider(oauthGoogleConfig) protected val authService = new AuthServiceImpl(new DummyCodeActivation, mediator) protected val sequenceConfig = SequenceServiceConfig.load.toOption.get protected val sequenceService = new SequenceServiceImpl(sequenceConfig) system.actorOf(RpcApiService.props(Seq(authService, sequenceService)), "rpcApiService") protected def createAuthId(): Long = { val authId = Random.nextLong() Await.result(db.run(persist.AuthId.create(authId, None, None)), 1.second) authId } protected def expectSeqUpdate(authId: Long, sessionId: Long, sendAckAt: Option[Duration] = Some(0.seconds))(implicit probe: TestProbe): SeqUpdate = { val mb = expectMessageBox(authId, sessionId) val update = UpdateBoxCodec.decode(mb.body.asInstanceOf[UpdateBox].bodyBytes).require.value.asInstanceOf[SeqUpdate] sendAckAt map { delay β‡’ Future { blocking { Thread.sleep(delay.toMillis) sendMessageBox(authId, sessionId, sessionRegion.ref, Random.nextLong, MessageAck(Vector(mb.messageId))) } } } update } protected def expectWeakUpdate(authId: Long, sessionId: Long)(implicit probe: TestProbe): WeakUpdate = { UpdateBoxCodec.decode(expectMessageBox(authId, sessionId).body.asInstanceOf[UpdateBox].bodyBytes).require.value.asInstanceOf[WeakUpdate] } protected def expectRpcResult(sendAckAt: Option[Duration] = Some(0.seconds), expectAckFor: Set[Long] = Set.empty)(implicit probe: TestProbe, sessionRegion: SessionRegion): RpcResult = { val messages = probe.receiveN(1 + expectAckFor.size).toSet if (messages.size != expectAckFor.size + 1) { fail(s"Expected response and acks for ${expectAckFor.mkString(",")}, got: ${messages.mkString(",")}") } else { val (rest, ackIds) = messages.foldLeft(Vector.empty[(Long, Long, Long, ProtoMessage)], Set.empty[Long]) { case ((rest, ackIds), MTPackage(authId, sessionId, mbBytes)) β‡’ val mb = MessageBoxCodec.decode(mbBytes).require.value mb.body match { case MessageAck(ids) β‡’ (rest, ackIds ++ ids) case body β‡’ (rest :+ ((authId, sessionId, mb.messageId, body)), ackIds) } } ackIds shouldEqual expectAckFor rest match { case Vector((authId, sessionId, messageId, RpcResponseBox(_, rpcResultBytes))) β‡’ sendAckAt map { delay β‡’ Future { blocking { Thread.sleep(delay.toMillis) sendMessageBox(authId, sessionId, sessionRegion.ref, Random.nextLong, MessageAck(Vector(messageId))) } } } RpcResultCodec.decode(rpcResultBytes).require.value case unexpected β‡’ throw new Exception(s"Expected RpcResponseBox but got $unexpected") } } } protected def expectMessageAck(authId: Long, sessionId: Long)(implicit probe: TestProbe): MessageAck = { val mb = expectMessageBox(authId, sessionId) mb.body shouldBe a[MessageAck] val ack = mb.body.asInstanceOf[MessageAck] ack } protected def expectMessageAck(authId: Long, sessionId: Long, messageId: Long)(implicit probe: TestProbe): MessageAck = { val mb = expectMessageBox(authId, sessionId) mb.body shouldBe a[MessageAck] val ack = mb.body.asInstanceOf[MessageAck] ack.messageIds should ===(Vector(messageId)) ack } protected def expectNewSession(authId: Long, sessionId: Long, messageId: Long)(implicit probe: TestProbe, sessionRegion: SessionRegion): NewSession = { expectMessageBoxPF(authId, sessionId) { case mb @ MessageBox(_, NewSession(sid, mid)) if sid == sessionId && mid == messageId β‡’ sendMessageBox(authId, sessionId, sessionRegion.ref, Random.nextLong(), MessageAck(Vector(mb.messageId))) val ns = mb.body.asInstanceOf[NewSession] ns should ===(NewSession(sessionId, messageId)) ns } } protected def ignoreNewSession(authId: Long, sessionId: Long)(implicit probe: TestProbe): Unit = { probe.ignoreMsg { case MTPackage(aid, sid, body) if aid == authId && sid == sessionId β‡’ MessageBoxCodec.decode(body).require.value.body.isInstanceOf[NewSession] case _ β‡’ false } } protected def expectMessageBoxPF[T](authId: Long, sessionId: Long, hint: String = "")(pf: PartialFunction[MessageBox, T])(implicit probe: TestProbe): T = { probe.expectMsgPF() { case MTPackage(aid, sid, body) if aid == authId && sid == sessionId β‡’ val mb = MessageBoxCodec.decode(body).require.value assert(pf.isDefinedAt(mb), s"expected: ${hint} but got ${mb}") pf(mb) } } protected def expectMessageBox(authId: Long, sessionId: Long)(implicit probe: TestProbe): MessageBox = { val packageBody = probe.expectMsgPF() { case MTPackage(aid, sid, body) if aid == authId && sid == sessionId β‡’ body } MessageBoxCodec.decode(packageBody).require.value } protected def sendMessageBox(authId: Long, sessionId: Long, session: ActorRef, messageId: Long, body: ProtoMessage)(implicit probe: TestProbe) = sendEnvelope(authId, sessionId, session, Payload.HandleMessageBox(HandleMessageBox(ByteString.copyFrom(MessageBoxCodec.encode(MessageBox(messageId, body)).require.toByteBuffer)))) protected def sendEnvelope(authId: Long, sessionId: Long, session: ActorRef, payload: Payload)(implicit probe: TestProbe) = { session.tell( SessionEnvelope( authId, sessionId ).withPayload(payload), probe.ref ) } }
jamesbond12/actor-platform
actor-server/actor-tests/src/test/scala/im/actor/server/session/BaseSessionSpec.scala
Scala
mit
8,378
package auth import io.flow.customer.v0.models.Customer import io.flow.customer.v0.{Client => CustomerClient} import lib.ResolvedToken import scala.concurrent.{ExecutionContext, Future} trait CustomerAuthHelper extends LoggingHelper { def customerClient: CustomerClient def requestHeadersUtil: RequestHeadersUtil private[auth] def getCustomerResolvedToken( requestId: String, customerNumber: String, sessionResolvedTokenOption: Option[ResolvedToken] )(implicit ec: ExecutionContext): Future[Option[ResolvedToken]] = { sessionResolvedTokenOption.map { t => getCustomerResolvedToken( requestId = requestId, customerNumber = customerNumber, sessionResolvedToken = t ) }.getOrElse(Future.successful(None)) } private[this] def getCustomerResolvedToken( requestId: String, customerNumber: String, sessionResolvedToken: ResolvedToken )(implicit ec: ExecutionContext): Future[Option[ResolvedToken]] = { sessionResolvedToken.organizationId.map { organizationId => getCustomer( requestId = requestId, organizationId = organizationId, customerNumber = customerNumber ).map { customer => Some( sessionResolvedToken.copy( customerNumber = customer.map(_.number) ) ) } }.getOrElse(Future.successful(None)) } private[this] def getCustomer( requestId: String, organizationId: String, customerNumber: String )(implicit ec: ExecutionContext): Future[Option[Customer]] = { customerClient.customers.getByNumber( organization = organizationId, number = customerNumber, requestHeaders = requestHeadersUtil.organizationAsSystemUser( organizationId = organizationId, requestId = requestId ) ).map { customer => Some(customer) }.recover { case io.flow.customer.v0.errors.UnitResponse(_) => None case ex: Throwable => { val msg = "Error communication with customer service" log(requestId). withKeyValue("customer_number", customerNumber). error(msg, ex) throw new RuntimeException(msg, ex) } } } }
flowvault/proxy
app/auth/CustomerAuthHelper.scala
Scala
mit
2,204
package net.quasardb.spark.rdd import java.sql.Timestamp import scala.collection.JavaConversions._ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import odelay.Timer import net.quasardb.qdb._ import net.quasardb.qdb.ts.{Table, Writer, WritableRow} import net.quasardb.qdb.exception.OperationException import net.quasardb.spark.connection.QdbConnection // import net.quasardb.spark.rdd.{DoubleRDD, BlobRDD} import retry._ import retry.Success object Util { def createCluster(uri: String) (implicit securityOptions : Option[Session.SecurityOptions]) : QdbCluster = securityOptions match { case Some(securityOptions) => new QdbCluster(uri, securityOptions) case None => new QdbCluster(uri) } def createSession(uri: String) (implicit securityOptions : Option[Session.SecurityOptions]) : Session = securityOptions match { case Some(securityOptions) => Session.connect(securityOptions, uri) case None => Session.connect(uri) } def appendRows( uri: String, table: String, values: Iterator[WritableRow])(implicit securityOptions : Option[Session.SecurityOptions]): Unit = { implicit val success = Success[Boolean](_ == true) implicit val timer = odelay.Timer.default val (begin, copy) = values.duplicate try { val writer : Writer = Table.autoFlushWriter(createSession(uri), table) copy.foreach { writer.append(_) } writer.flush } catch { // Thrown in case of race condition case e: OperationException => appendRows(uri, table, begin) } } def insertDoubles( uri: String, table: String, column: String, values: Iterator[(Timestamp, Double)])(implicit securityOptions : Option[Session.SecurityOptions]): Unit = { var collection = new QdbDoubleColumnCollection(column) collection.addAll(values.map(DoubleRDD.toJava).toList) implicit val success = Success[Boolean](_ == true) implicit val timer = odelay.Timer.default val future = retry.Backoff(8, 50.millis)(timer) { () => try { createCluster(uri) .timeSeries(table) .insertDoubles(collection) Future.successful(true) } catch { // Thrown in case of race condition case e: OperationException => Future.failed(e) } } Await.result(future, 30.second) } def insertBlobs( uri: String, table: String, column: String, values: Iterator[(Timestamp, Array[Byte])])(implicit securityOption : Option[Session.SecurityOptions]): Unit = { var collection = new QdbBlobColumnCollection(column) collection.addAll(values.map(BlobRDD.toJava).toList) implicit val success = Success[Boolean](_ == true) implicit val timer = odelay.Timer.default val future = retry.Backoff(8, 50.millis)(timer) { () => try { createCluster(uri) .timeSeries(table) .insertBlobs(collection) Future.successful(true) } catch { // Thrown in case of race condition case e: OperationException => Future.failed(e) } } Await.result(future, 30.second) } }
bureau14/qdb-spark-connector
src/main/scala/net/quasardb/spark/rdd/Util.scala
Scala
bsd-2-clause
3,224
/* Copyright 2009-2018 EPFL, Lausanne */ package inox package solvers package unrolling import utils._ import scala.collection.generic.CanBuildFrom trait Templates extends TemplateGenerator with FunctionTemplates with LambdaTemplates with QuantificationTemplates with EqualityTemplates with TypeTemplates with IncrementalStateWrapper { val program: Program val context: Context protected implicit val semantics: program.Semantics import context._ import program._ import program.trees._ import program.symbols._ implicit val debugSection = DebugSectionSolver type Encoded def asString(e: Encoded): String def abort: Boolean def pause: Boolean def encodeSymbol(v: Variable): Encoded def mkEncoder(bindings: Map[Variable, Encoded])(e: Expr): Encoded def mkSubstituter(map: Map[Encoded, Encoded]): Encoded => Encoded def mkNot(e: Encoded): Encoded def mkOr(es: Encoded*): Encoded def mkAnd(es: Encoded*): Encoded def mkEquals(l: Encoded, r: Encoded): Encoded def mkImplies(l: Encoded, r: Encoded): Encoded def extractNot(e: Encoded): Option[Encoded] def decodePartial(e: Encoded, tpe: Type): Option[Expr] private[unrolling] lazy val trueT = mkEncoder(Map.empty)(BooleanLiteral(true)) private[unrolling] lazy val falseT = mkEncoder(Map.empty)(BooleanLiteral(false)) protected lazy val deferFactor = 3 * options.findOptionOrDefault(optModelFinding) private var currentGen: Int = 0 protected def currentGeneration: Int = currentGen protected def nextGeneration(gen: Int): Int = gen + 3 + deferFactor trait Manager extends IncrementalStateWrapper { def unrollGeneration: Option[Int] def unroll: Clauses def satisfactionAssumptions: Seq[Encoded] def refutationAssumptions: Seq[Encoded] def promoteBlocker(b: Encoded): Boolean } private val managers: Seq[Manager] = Seq( functionsManager, typesManager, equalityManager, lambdasManager, quantificationsManager ) def canUnroll: Boolean = managers.exists(_.unrollGeneration.isDefined) def unroll: Clauses = { assert(canUnroll, "Impossible to unroll further") currentGen = managers.flatMap(_.unrollGeneration).min + 1 reporter.debug("Unrolling generation [" + currentGen + "]") managers.flatMap(_.unroll) } def satisfactionAssumptions = managers.flatMap(_.satisfactionAssumptions) def refutationAssumptions = managers.flatMap(_.refutationAssumptions) // implication tree that we're sure about: if (b1, b2) is in the tree, then // we have the precise semantics of b1 ==> b2 in the resulting clause set private val condImplies = new IncrementalMap[Encoded, Set[Encoded]].withDefaultValue(Set.empty) private val condImplied = new IncrementalMap[Encoded, Set[Encoded]].withDefaultValue(Set.empty) // implication tree that isn't quite ensured in the resulting clause set // this can happen due to defBlocker caching in unrolling private val potImplies = new IncrementalMap[Encoded, Set[Encoded]].withDefaultValue(Set.empty) private val potImplied = new IncrementalMap[Encoded, Set[Encoded]].withDefaultValue(Set.empty) private val condEquals = new IncrementalBijection[Encoded, Set[Encoded]] // Set of variables that have already been declared in this solver private val declared = new IncrementalSet[(Variable, Encoded)] val incrementals: Seq[IncrementalState] = managers ++ Seq( condImplies, condImplied, potImplies, potImplied, condEquals, declared ) protected def freshConds( pathVar: Encoded, condVars: Map[Variable, Encoded], tree: Map[Variable, Set[Variable]]): Map[Encoded, Encoded] = { val subst = condVars.map { case (v, idT) => idT -> encodeSymbol(v) } val mapping = condVars.mapValues(subst) for ((parent, children) <- tree) { mapping.get(parent) match { case None => // enabling condition, corresponds to pathVar for (child <- children) { val ec = mapping(child) condImplies += pathVar -> (condImplies(pathVar) + ec) condImplied += ec -> (condImplied(ec) + pathVar) } case Some(ep) => for (child <- children) { val ec = mapping(child) condImplies += ep -> (condImplies(ep) + ec) condImplied += ec -> (condImplied(ec) + ep) } } } subst } private val sym = Variable.fresh("bs", BooleanType(), true) protected def encodeBlockers(bs: Set[Encoded]): (Encoded, Clauses) = bs.toSeq.filter(_ != trueT) match { case Seq(b) if ( condImplies.isDefinedAt(b) || condImplied.isDefinedAt(b) || potImplies.isDefinedAt(b) || potImplied.isDefinedAt(b) || condEquals.containsA(b) ) => (b, Seq.empty) case _ => val flatBs = fixpoint((bs: Set[Encoded]) => bs.flatMap(b => condEquals.getBorElse(b, Set(b))))(bs) condEquals.getA(flatBs) match { case Some(b) => (b, Seq.empty) case None => val b = encodeSymbol(sym) condEquals += (b -> flatBs) (b, Seq(mkEquals(b, if (flatBs.isEmpty) trueT else mkAnd(flatBs.toSeq : _*)))) } } protected def registerImplication(b1: Encoded, b2: Encoded): Unit = { potImplies += b1 -> (potImplies(b1) + b2) potImplied += b2 -> (potImplied(b2) + b1) } protected def blockerEquals(b: Encoded): Set[Encoded] = condEquals.getBorElse(b, Set.empty) protected def blockerParents(b: Encoded, strict: Boolean = true): Set[Encoded] = { condImplied(b) ++ (if (!strict) potImplied(b) else Set.empty) } protected def blockerChildren(b: Encoded, strict: Boolean = true): Set[Encoded] = { condImplies(b) ++ (if (!strict) potImplies(b) else Set.empty) } protected def blockerPath(b: Encoded): Set[Encoded] = blockerPath(Set(b)) /* This set is guaranteed finite and won't expand beyond the limit of a function's * definition as aVar ==> defBlocker is NOT a strict implication (ie. won't be in * the condImplied map) */ protected def blockerPath(bs: Set[Encoded]): Set[Encoded] = fixpoint((bs: Set[Encoded]) => bs.flatMap { b => val equal = condEquals.getBorElse(b, Set.empty) if (equal.nonEmpty) equal else (condImplied(b) + b) })(bs).filter(_ != trueT) def promoteBlocker(b: Encoded, force: Boolean = false): Boolean = { var seen: Set[Encoded] = Set.empty var promoted: Boolean = false var blockers: Seq[Set[Encoded]] = Seq(Set(b)) do { val (bs +: rest) = blockers blockers = rest val allBs = bs ++ bs.flatMap(blockerEquals) val next = (for (b <- allBs if !seen(b)) yield { seen += b for (manager <- managers) { val p = manager.promoteBlocker(b) promoted = promoted || p } if (force) { blockerChildren(b, strict = false) } else { Seq.empty[Encoded] } }).flatten if (next.nonEmpty) blockers :+= next } while (!promoted && blockers.nonEmpty) promoted } type Arg = Either[Encoded, Matcher] implicit class ArgWrapper(arg: Arg) { def encoded: Encoded = arg match { case Left(value) => value case Right(matcher) => matcher.encoded } def substitute(substituter: Encoded => Encoded, msubst: Map[Encoded, Matcher]): Arg = arg match { case Left(v) => msubst.get(v) match { case Some(m) => Right(m) case None => Left(substituter(v)) } case Right(m) => Right(m.substitute(substituter, msubst)) } } /** Represents a named function call in the unfolding procedure */ case class Call(tfd: TypedFunDef, args: Seq[Arg], tpSubst: Seq[Arg]) { override def toString: String = { def pArgs(args: Seq[Arg]): String = args.map { case Right(m) => m.toString case Left(v) => asString(v) }.mkString("(", ", ", ")") def rec(tpe: Type, args: Seq[Arg]): String = tpe match { case ft: FunctionType => val (currArgs, nextArgs) = args.splitAt(ft.from.size) pArgs(currArgs) + rec(ft.to, nextArgs) case _ => pArgs(args) } tfd.signature + rec(tfd.getType, args) + pArgs(tpSubst) } def substitute(substituter: Encoded => Encoded, msubst: Map[Encoded, Matcher]): Call = copy( args = args.map(_.substitute(substituter, msubst)), tpSubst = tpSubst.map(_.substitute(substituter, msubst)) ) } /** Represents an application of a first-class function in the unfolding procedure */ case class App(caller: Encoded, tpe: FunctionType, args: Seq[Arg], encoded: Encoded) { override def toString: String = "(" + asString(caller) + " : " + tpe.asString + ")" + args.map(a => asString(a.encoded)).mkString("(", ",", ")") def substitute(substituter: Encoded => Encoded, msubst: Map[Encoded, Matcher]): App = copy( caller = substituter(caller), args = args.map(_.substitute(substituter, msubst)), encoded = substituter(encoded) ) } /** Represents an E-matching matcher that will be used to instantiate relevant quantified propositions */ case class Matcher(key: Either[(Encoded, Type), TypedFunDef], args: Seq[Arg], encoded: Encoded) { override def toString: String = (key match { case Left((c, tpe)) => asString(c) case Right(tfd) => tfd.signature }) + args.map { case Right(m) => m.toString case Left(v) => asString(v) }.mkString("(", ",", ")") def substitute(substituter: Encoded => Encoded, msubst: Map[Encoded, Matcher]): Matcher = copy( key = key.left.map(p => substituter(p._1) -> p._2), args = args.map(_.substitute(substituter, msubst)), encoded = substituter(encoded) ) } /** Represents an equality relation between two instances of a given type */ case class Equality(tpe: Type, e1: Encoded, e2: Encoded) { override def toString: String = s"${asString(e1)} == ${asString(e2)} (of type ${tpe.asString})" def substitute(substituter: Encoded => Encoded): Equality = copy( e1 = substituter(e1), e2 = substituter(e2) ) lazy val symbols: (Variable, Encoded) = equalitySymbol(tpe) } /** Template instantiations * * [[Template]] instances, when provided with concrete arguments and a * blocker, will generate three outputs used for program unfolding: * - clauses: clauses that will be added to the underlying solver * - call blockers: bookkeeping information necessary for named * function unfolding * - app blockers: bookkeeping information necessary for first-class * function unfolding * * This object provides helper methods to deal with the triplets * generated during unfolding. */ implicit class MapSetWrapper[A,B](map: Map[A,Set[B]]) { def merge(that: Map[A,Set[B]]): Map[A,Set[B]] = (map.keys ++ that.keys).map { k => k -> (map.getOrElse(k, Set.empty) ++ that.getOrElse(k, Set.empty)) }.toMap def merge(that: (A,B)): Map[A,Set[B]] = map + (that._1 -> (map.getOrElse(that._1, Set.empty) + that._2)) } implicit class MapSeqWrapper[A,B](map: Map[A,Seq[B]]) { def merge(that: Map[A,Seq[B]]): Map[A,Seq[B]] = (map.keys ++ that.keys).map { k => k -> (map.getOrElse(k, Seq.empty) ++ that.getOrElse(k, Seq.empty)).distinct }.toMap def merge(that: (A,B)): Map[A,Seq[B]] = map + (that._1 -> (map.getOrElse(that._1, Seq.empty) :+ that._2)) } /** Abstract templates * * Pre-compiled sets of clauses with extra bookkeeping information that enables * efficient unfolding of function calls and applications. * [[Template]] is a super-type for all such clause sets that can be instantiated * given a concrete argument list and a blocker in the decision-tree. */ type Clauses = Seq[Encoded] type Apps = Map[Encoded, Set[App]] type Calls = Map[Encoded, Set[Call]] type Types = Map[Encoded, Set[Typing]] type Matchers = Map[Encoded, Set[Matcher]] type Equalities = Map[Encoded, Set[Equality]] type Pointers = Map[Encoded, Encoded] object TemplateContents { def empty(pathVar: (Variable, Encoded), args: Seq[(Variable, Encoded)]) = TemplateContents(pathVar, args, Map(), Map(), Map(), Seq(), Map(), Map(), Map(), Map(), Map(), Seq(), Seq(), Map()) } case class TemplateContents( val pathVar : (Variable, Encoded), val arguments : Seq[(Variable, Encoded)], val condVars : Map[Variable, Encoded], val exprVars : Map[Variable, Encoded], val condTree : Map[Variable, Set[Variable]], val clauses : Clauses, val types : Types, val blockers : Calls, val applications : Apps, val matchers : Matchers, val equalities : Equalities, val lambdas : Seq[LambdaTemplate], val quantifications : Seq[QuantificationTemplate], val pointers : Pointers) { lazy val args = arguments.map(_._2) def substitute(substituter: Encoded => Encoded, msubst: Map[Encoded, Matcher]): TemplateContents = TemplateContents( pathVar._1 -> substituter(pathVar._2), arguments, condVars, exprVars, condTree, clauses.map(substituter), types.map { case (b, tps) => substituter(b) -> tps.map(_.substitute(substituter, msubst)) }, blockers.map { case (b, fis) => substituter(b) -> fis.map(_.substitute(substituter, msubst)) }, applications.map { case (b, apps) => substituter(b) -> apps.map(_.substitute(substituter, msubst)) }, matchers.map { case (b, ms) => substituter(b) -> ms.map(_.substitute(substituter, msubst)) }, equalities.map { case (b, eqs) => substituter(b) -> eqs.map(_.substitute(substituter)) }, lambdas.map(_.substitute(substituter, msubst)), quantifications.map(_.substitute(substituter, msubst)), pointers.map(p => substituter(p._1) -> substituter(p._2)) ) def substitution(aVar: Encoded, args: Seq[Arg]): (Clauses, Map[Encoded, Arg]) = substitution(aVar, (this.args zip args).toMap + (pathVar._2 -> Left(aVar))) def substitution(aVar: Encoded, substMap: Map[Encoded, Arg]): (Clauses, Map[Encoded, Arg]) = Template.substitution(condVars, exprVars, condTree, types, lambdas, quantifications, pointers, substMap, aVar) def instantiate(substMap: Map[Encoded, Arg]): Clauses = Template.instantiate(clauses, blockers, applications, matchers, equalities, substMap) def merge( condVars : Map[Variable, Encoded], exprVars : Map[Variable, Encoded], condTree : Map[Variable, Set[Variable]], clauses : Clauses, types : Types, blockers : Calls, applications : Apps, matchers : Matchers, equalities : Equalities, lambdas : Seq[LambdaTemplate], quantifications : Seq[QuantificationTemplate], pointers : Pointers ): TemplateContents = TemplateContents( pathVar, arguments, this.condVars ++ condVars, this.exprVars ++ exprVars, this.condTree merge condTree, this.clauses ++ clauses, this.types merge types, this.blockers merge blockers, this.applications merge applications, this.matchers merge matchers, this.equalities merge equalities, this.lambdas ++ lambdas, this.quantifications ++ quantifications, this.pointers ++ pointers ) } trait Template { self => val contents: TemplateContents lazy val start = contents.pathVar._2 def instantiate(aVar: Encoded, args: Seq[Arg]): Clauses = { val (clauses, substMap) = contents.substitution(aVar, args) clauses ++ instantiate(substMap) } protected def instantiate(substMap: Map[Encoded, Arg]): Clauses = { contents.instantiate(substMap) } override def toString : String = "Instantiated template" } /** Semi-template used for inner-template equality * * We introduce a structure here that resembles a [[Template]] that is instantiated * ONCE when the corresponding template becomes of interest. */ class TemplateStructure( /** The normalized expression that is shared between all templates that are "equal". * Template equality is conditioned on [[body]] equality. * * @see [[dependencies]] for the other component of equality */ val body: Expr, /** The closed expressions (independent of the arguments to [[body]]) contained in * the inner-template. Equality is conditionned on equality of [[dependencies]] * (inside the solver). * * @see [[body]] for the other component of equality */ val dependencies: Seq[Encoded], val contents: TemplateContents) { def substitute(substituter: Encoded => Encoded, msubst: Map[Encoded, Matcher]) = new TemplateStructure( body, dependencies.map(substituter), contents.substitute(substituter, msubst) ) /** The [[key]] value (triplet of [[body]], a normalization of the `pathVar` of [[contents]] and [[locals]]) * is used to determine syntactic equality between inner-templates. If the key of two such * templates are equal, then they must necessarily be equal in every model. * * The [[instantiation]] consists of the clause set instantiation (in the sense of * [[Template.instantiate]] that is required for [[dependencies]] to make sense in the solver * (introduces blockers, lambdas, quantifications, etc.) Since [[dependencies]] CHANGE during * instantiation and [[key]] makes no sense without the associated instantiation, the implicit * contract here is that whenever a new key appears during unfolding, its associated * instantiation MUST be added to the set of instantiations managed by the solver. However, if * an identical (or subsuming) pre-existing key has already been found, then the associated * instantiation must already appear in the handled by the solver and the new one can be discarded. * * The [[locals]] value consists of the [[dependencies]] on which the substitution resulting * from instantiation has been applied. The [[dependencies]] should not be directly used here * as they may depend on closure and quantifier ids that were only obtained when [[instantiation]] * was computed. * * The [[instantiationSubst]] substitution corresponds that applied to [[dependencies]] when * constructing [[locals]]. */ lazy val (key, instantiation, locals, instantiationSubst) = { val (substClauses, substMap) = contents.substitution(contents.pathVar._2, Map.empty[Encoded, Arg]) val tmplClauses = contents.instantiate(substMap) val instantiation = substClauses ++ tmplClauses val substituter = mkSubstituter(substMap.mapValues(_.encoded)) val deps = dependencies.map(substituter) val key = (body, blockerPath(contents.pathVar._2), deps) val sortedDeps = exprOps.variablesOf(body).toSeq.sortBy(_.id) val locals = sortedDeps zip deps (key, instantiation, locals, substMap.mapValues(_.encoded)) } override def equals(that: Any): Boolean = that match { case (struct: TemplateStructure) => key == struct.key case _ => false } override def hashCode: Int = key.hashCode def subsumes(that: TemplateStructure): Boolean = { key._1 == that.key._1 && key._3 == that.key._3 && key._2.subsetOf(that.key._2) } } object Template { def lambdaPointers(encoder: Expr => Encoded)(expr: Expr): Map[Encoded, Encoded] = { def collectSelectors(expr: Expr, ptr: Expr): Seq[(Expr, Variable)] = expr match { case adt @ ADT(id, tps, es) => (adt.getConstructor.fields zip es).flatMap { case (vd, e) => collectSelectors(e, ADTSelector(ptr, vd.id)) } case Tuple(es) => es.zipWithIndex.flatMap { case (e, i) => collectSelectors(e, TupleSelect(ptr, i + 1)) } case IsTyped(v: Variable, _: FunctionType) => Seq(ptr -> v) case _ => Seq.empty } val pointers = exprOps.collect { case Equals(v @ (_: Variable | _: FunctionInvocation | _: Application), e) => collectSelectors(e, v).toSet case Equals(e, v @ (_: Variable | _: FunctionInvocation | _: Application)) => collectSelectors(e, v).toSet case FunctionInvocation(_, _, es) => es.flatMap(e => collectSelectors(e, e)).toSet case Application(_, es) => es.flatMap(e => collectSelectors(e, e)).toSet case e: Tuple => collectSelectors(e, e).toSet case e: ADT => collectSelectors(e, e).toSet case _ => Set.empty[(Expr, Variable)] } (expr).toMap pointers.map(p => encoder(p._1) -> encoder(p._2)) } def extractCalls( expr: Expr, substMap: Map[Variable, Encoded] = Map.empty[Variable, Encoded], optCall: Option[(TypedFunDef, Seq[Arg])] = None, optApp: Option[App] = None ): (Set[Call], Set[App], Set[Matcher], Pointers) = { val encoder : Expr => Encoded = mkEncoder(substMap) val pointers = lambdaPointers(encoder)(expr) val exprToMatcher = exprOps.fold[Map[Expr, Matcher]] { (expr, res) => val result = res.flatten.toMap result ++ (expr match { case QuantificationMatcher(c, Seq(e1, _)) if c == equalitySymbol(e1.getType)._1 => None case QuantificationMatcher(c, args) => // Note that we rely here on the fact that foldRight visits the matcher's arguments first, // so any Matcher in arguments will belong to the `result` map val encodedArgs = args.map(arg => result.get(arg) match { case Some(matcher) => Right(matcher) case None => Left(encoder(arg)) }) Some(expr -> Matcher(Left(encoder(c) -> c.getType), encodedArgs, encoder(expr))) case FunctionMatcher(tfd, args) => // see comment above val encodedArgs = args.map(arg => result.get(arg) match { case Some(matcher) => Right(matcher) case None => Left(encoder(arg)) }) Some(expr -> Matcher(Right(tfd), encodedArgs, encoder(expr))) case _ => None }) }(expr) def encodeArg(arg: Expr): Arg = exprToMatcher.get(arg) match { case Some(matcher) => Right(matcher) case None => Left(encoder(arg)) } val calls = exprOps.collect[FunctionInvocation] { case fi: FunctionInvocation => Set(fi) case _ => Set.empty } (expr).map { case FunctionInvocation(id, tps, args) => val tpVars = tps.flatMap(variableSeq).distinct Call(getFunction(id, tps), args.map(encodeArg), tpVars.map(encodeArg)) }.filter { case Call(tfd, args, _) => !optCall.exists(p => p._1 == tfd && p._2 == args) } val apps = exprOps.collect[Application] { case app: Application => Set(app) case _ => Set.empty } (expr).filter { case Application(c, Seq(e1, e2)) => c != equalitySymbol(e1.getType)._1 case _ => true }.map { case app @ Application(c, args) => val tpe = c.getType.asInstanceOf[FunctionType] App(encoder(c), tpe, args.map(encodeArg), encoder(app)) }.filter(i => Some(i) != optApp) val matchers = exprToMatcher.values.toSet .filter(i => Some(i.encoded) != optApp.map(_.encoded)) .filter { case Matcher(Right(tfd), args, _) => !optCall.exists(p => p._1 == tfd && p._2 == args) case _ => true } (calls, apps, matchers, pointers) } def encode( pathVar: (Variable, Encoded), arguments: Seq[(Variable, Encoded)], tmplClauses: TemplateClauses, substMap: Map[Variable, Encoded] = Map.empty[Variable, Encoded], optCall: Option[TypedFunDef] = None, optApp: Option[(Encoded, FunctionType)] = None ): (Clauses, Calls, Apps, Matchers, Pointers, () => String) = { val (condVars, exprVars, _, guardedExprs, eqs, _, equalities, lambdas, quants) = tmplClauses val idToTrId : Map[Variable, Encoded] = condVars ++ exprVars + pathVar ++ arguments ++ substMap ++ lambdas.map(_.ids) ++ quants.flatMap(_.mapping) ++ equalities.flatMap(_._2.map(_.symbols)) ++ typesManager.tpSubst val encoder: Expr => Encoded = mkEncoder(idToTrId) val optIdCall = optCall.map { tfd => (tfd, arguments.map(p => Left(p._2))) } val optIdApp = optApp.map { case (idT, tpe) => App(idT, tpe, arguments.map(p => Left(p._2)), mkApp(idT, tpe, arguments.map(_._2))) } val (clauses, blockers, applications, matchers, pointers) = { var clauses : Clauses = Seq.empty var blockers : Map[Encoded, Set[Call]] = Map.empty var applications : Map[Encoded, Set[App]] = Map.empty var matchers : Map[Encoded, Set[Matcher]] = Map.empty var pointers : Map[Encoded, Encoded] = Map.empty val pv = pathVar._1 for ((b,es) <- guardedExprs merge Map(pv -> eqs)) { var calls : Set[Call] = Set.empty var apps : Set[App] = Set.empty var matchs : Set[Matcher] = Set.empty val bp = idToTrId(b) for (e <- es) { val (eCalls, eApps, eMatchers, ePtrs) = extractCalls(e, idToTrId, optIdCall, optIdApp) calls ++= eCalls apps ++= eApps matchs ++= eMatchers pointers ++= ePtrs } if (calls.nonEmpty) blockers += bp -> calls if (apps.nonEmpty) applications += bp -> apps if (matchs.nonEmpty) matchers += bp -> matchs } clauses ++= (for ((b,es) <- guardedExprs; e <- es) yield encoder(Implies(b, e))) clauses ++= eqs.map(encoder) (clauses, blockers, applications, matchers, pointers) } val stringRepr : () => String = () => { " * Activating boolean : " + pathVar._1.asString + "\\n" + " * Control booleans : " + condVars.keys.map(_.asString).mkString(", ") + "\\n" + " * Expression vars : " + exprVars.keys.map(_.asString).mkString(", ") + "\\n" + " * Clauses : " + (if (guardedExprs.isEmpty) "\\n" else { "\\n " + (for ((b,es) <- guardedExprs; e <- es) yield (b.asString + " ==> " + e.asString)).mkString("\\n ") + "\\n" }) + " * Invocation-blocks :" + (if (blockers.isEmpty) "\\n" else { "\\n " + blockers.map(p => asString(p._1) + " ==> " + p._2).mkString("\\n ") + "\\n" }) + " * Application-blocks :" + (if (applications.isEmpty) "\\n" else { "\\n " + applications.map(p => asString(p._1) + " ==> " + p._2).mkString("\\n ") + "\\n" }) + " * Matchers :" + (if (matchers.isEmpty) "\\n" else { "\\n " + matchers.map(p => asString(p._1) + " ==> " + p._2).mkString("\\n ") + "\\n" }) + " * Lambdas :\\n" + lambdas.map { case template => " +> " + template.toString.split("\\n").mkString("\\n ") + "\\n" }.mkString("\\n") + " * Foralls :\\n" + quants.map { case template => " +> " + template.toString.split("\\n").mkString("\\n ") + "\\n" }.mkString("\\n") } (clauses, blockers, applications, matchers, pointers, stringRepr) } def contents( pathVar: (Variable, Encoded), arguments: Seq[(Variable, Encoded)], tmplClauses: TemplateClauses, substMap: Map[Variable, Encoded] = Map.empty, optCall: Option[TypedFunDef] = None, optApp: Option[(Encoded, FunctionType)] = None ): (TemplateContents, () => String) = { val (condVars, exprVars, condTree, types, equalities, lambdas, quants) = tmplClauses.proj val (clauses, blockers, applications, matchers, pointers, string) = Template.encode( pathVar, arguments, tmplClauses, substMap = substMap, optCall = optCall, optApp = optApp) val contents = TemplateContents( pathVar, arguments, condVars, exprVars, condTree, clauses, types, blockers, applications, matchers, equalities, lambdas, quants, pointers ) (contents, string) } def substitution( condVars: Map[Variable, Encoded], exprVars: Map[Variable, Encoded], condTree: Map[Variable, Set[Variable]], types: Types, lambdas: Seq[LambdaTemplate], quants: Seq[QuantificationTemplate], pointers: Map[Encoded, Encoded], baseSubst: Map[Encoded, Arg], aVar: Encoded ): (Clauses, Map[Encoded, Arg]) = { val freshSubst = exprVars.map { case (v, vT) => vT -> encodeSymbol(v) } ++ freshConds(aVar, condVars, condTree) val matcherSubst = baseSubst.collect { case (c, Right(m)) => c -> m } var subst = freshSubst.mapValues(Left(_)) ++ baseSubst var clauses : Clauses = Seq.empty // We instantiate types before quantifications in order to register functions // before trying to instantiate matchers introduced by quantifications val baseSubstituter = mkSubstituter(subst.mapValues(_.encoded)) for ((b, tps) <- types if !abort; bp = baseSubstituter(b); tp <- tps if !abort) { clauses ++= instantiateType(bp, tp.substitute(baseSubstituter, matcherSubst)) } // /!\\ CAREFUL /!\\ // We have to be wary while computing the lambda subst map since lambdas can // depend on each other. However, these dependencies cannot be cyclic so it // suffices to make sure the traversal order is correct. var seen : Set[LambdaTemplate] = Set.empty val lambdaKeys = lambdas.map(lambda => lambda.ids._2 -> lambda).toMap def extractSubst(lambda: LambdaTemplate): Unit = { for { dep <- lambda.closures.map(_._2) flatMap lambdaKeys.get if !seen(dep) } extractSubst(dep) if (!seen(lambda)) { val substMap = subst.mapValues(_.encoded) val substLambda = lambda.substitute(mkSubstituter(substMap), matcherSubst) val (idT, cls) = instantiateLambda(substLambda) subst += lambda.ids._2 -> Left(idT) clauses ++= cls seen += lambda } } for (l <- lambdas) extractSubst(l) // instantiate positive quantifications last to avoid introducing // extra quantifier instantiations that arise due to empty domains val (others, positives) = quants.partition(_.polarity.isNegative) for (q <- others ++ positives) { val substMap = subst.mapValues(_.encoded) val substQuant = q.substitute(mkSubstituter(substMap), matcherSubst) val (map, cls) = instantiateQuantification(substQuant) subst ++= map.mapValues(Left(_)) clauses ++= cls } val substituter = mkSubstituter(subst.mapValues(_.encoded)) for ((ptr, lambda) <- pointers) { registerLambda(substituter(ptr), substituter(lambda)) } (clauses, subst) } def instantiate( clauses: Clauses, calls: Calls, apps: Apps, matchers: Matchers, equalities: Equalities, substMap: Map[Encoded, Arg] ): Clauses = { val substituter : Encoded => Encoded = mkSubstituter(substMap.mapValues(_.encoded)) val msubst = substMap.collect { case (c, Right(m)) => c -> m } val allClauses = new scala.collection.mutable.ListBuffer[Encoded] allClauses ++= clauses.map(substituter) for ((b, fis) <- calls if !abort; bp = substituter(b); fi <- fis if !abort) { allClauses ++= instantiateCall(bp, fi.substitute(substituter, msubst)) } for ((b,fas) <- apps if !abort; bp = substituter(b); fa <- fas if !abort) { allClauses ++= instantiateApp(bp, fa.substitute(substituter, msubst)) } for ((b, matchs) <- matchers if !abort; bp = substituter(b); m <- matchs if !abort) { allClauses ++= instantiateMatcher(bp, m.substitute(substituter, msubst)) } for ((b, eqs) <- equalities if !abort; bp = substituter(b); e <- eqs if !abort) { allClauses ++= instantiateEquality(bp, e.substitute(substituter)) } allClauses.toSeq } } private[this] def instantiate( bindings: Map[Variable, Encoded], gen: (Variable, Encoded) => TemplateClauses ): Clauses = { val start = Variable.fresh("start", BooleanType(), true) val encodedStart = encodeSymbol(start) val tmplClauses = gen(start, encodedStart) val (clauses, calls, apps, matchers, pointers, _) = Template.encode(start -> encodedStart, bindings.toSeq, tmplClauses) val (condVars, exprVars, condTree, types, equalities, lambdas, quants) = tmplClauses.proj val (substClauses, substMap) = Template.substitution( condVars, exprVars, condTree, types, lambdas, quants, pointers, Map.empty, encodedStart) val templateClauses = Template.instantiate(clauses, calls, apps, matchers, equalities, substMap) val allClauses = encodedStart +: (substClauses ++ templateClauses) for (cl <- allClauses) { reporter.debug(" . " + cl) } allClauses } def instantiateVariable(v: Variable, bindings: Map[Variable, Encoded]): Clauses = { if (declared contains (v -> bindings(v))) { Seq.empty } else { declared += v -> bindings(v) instantiate(bindings, { (start, encodedStart) => mkClauses(start, v.tpe, v, bindings + (start -> encodedStart))(FreeGenerator) }) } } def instantiateExpr(expr: Expr, bindings: Map[Variable, Encoded]): Clauses = { instantiate(bindings, { (start, encodedStart) => val instExpr = timers.solvers.simplify.run { simplifyFormula(expr) } val tmplClauses = mkClauses(start, instExpr, bindings + (start -> encodedStart), polarity = Some(true)) val tpeClauses = bindings.filterNot(declared contains _).map { case (v, s) => declared += v -> s mkClauses(start, v.tpe, v, bindings + (start -> encodedStart))(FreeGenerator) } tpeClauses.foldLeft(tmplClauses)(_ ++ _) }) } }
romac/inox
src/main/scala/inox/solvers/unrolling/Templates.scala
Scala
apache-2.0
34,118
package views.html package user import controllers.routes import lila.api.Context import lila.app.templating.Environment._ import lila.app.ui.ScalatagsTemplate._ import lila.user.User object bots { def apply(users: List[User])(implicit ctx: Context) = { val title = s"${users.size} Online bots" val sorted = users.sortBy { -_.playTime.??(_.total) } views.html.base.layout( title = title, moreCss = frag(cssTag("slist"), cssTag("user.list")), wrapClass = "full-screen-force" )( main(cls := "page-menu bots")( user.bits.communityMenu("bots"), sorted.partition(_.isVerified) match { case (featured, all) => div(cls := "bots page-menu__content")( div(cls := "box bots__featured")( div(cls := "box__top")(h1("Featured bots")), botTable(featured) ), div(cls := "box")( div(cls := "box__top")( h1("Community bots"), a( cls := "bots__about", href := "https://lichess.org/blog/WvDNticAAMu_mHKP/welcome-lichess-bots" )( "About Lichess Bots" ) ), botTable(all) ) ) } ) ) } private def botTable(users: List[User])(implicit ctx: Context) = table(cls := "slist slist-pad")( tbody( users.sortBy { u => (if (u.isVerified) -1 else 1, -u.playTime.??(_.total)) } map { u => tr( td(userLink(u)), u.profile .ifTrue(ctx.noKid) .ifTrue(!u.marks.troll || ctx.is(u)) .flatMap(_.nonEmptyBio) .map { bio => td(shorten(bio, 400)) } | td, ctx.pref.showRatings option td(cls := "rating")(u.best3Perfs.map { showPerfRating(u, _) }), u.playTime.fold(td) { playTime => td( p( cls := "text", dataIcon := "ξ€Ά", st.title := trans.tpTimeSpentPlaying.txt(showPeriod(playTime.totalPeriod)) )(showPeriod(playTime.totalPeriod)), playTime.nonEmptyTvPeriod.map { tvPeriod => p( cls := "text", dataIcon := "ξ€₯", st.title := trans.tpTimeSpentOnTV.txt(showPeriod(tvPeriod)) )(showPeriod(tvPeriod)) } ) }, if (ctx is u) td else { td( a( dataIcon := "", cls := List("button button-empty text" -> true), st.title := trans.challenge.challengeToPlay.txt(), href := s"${routes.Lobby.home}?user=${u.username}#friend" )(trans.play()) ) } ) } ) ) }
luanlv/lila
app/views/user/bots.scala
Scala
mit
2,947
package services import actors.EventStreamActor import akka.actor.ActorSystem import akka.stream.Materializer import akka.stream.impl.StreamSupervisor.Materialize import com.appliedscala.events.LogRecord import dao.Neo4JReadDao import model.ServerSentMessage import play.api.Configuration /** * QuestionEventConsumer class */ class QuestionEventConsumer(neo4JReadDao: Neo4JReadDao, actorSystem: ActorSystem, configuration: Configuration, materializer: Materializer, readService: ReadService) { val topicName = "questions" val serviceKafkaConsumer = new ServiceKafkaConsumer(Set(topicName), "read", materializer, actorSystem, configuration, handleEvent) private def handleEvent(event: String): Unit = { val maybeLogRecord = LogRecord.decode(event) maybeLogRecord.foreach(adjustReadState) } private def adjustReadState(logRecord: LogRecord): Unit = { neo4JReadDao handleEvent logRecord val questionsT = readService.getAllQuestions questionsT.foreach { questions=> val update = ServerSentMessage.create("questions", questions) val esActor = actorSystem.actorSelection(EventStreamActor.pathPattern) esActor ! EventStreamActor.DataUpdated(update.json) } } }
getArtemUsername/play-and-events
app/services/QuestionEventConsumer.scala
Scala
mit
1,294
package com.glowingavenger.plan.model.action import org.scalatest.{Matchers, FlatSpec} import org.sat4j.scala.Logic._ import com.glowingavenger.plan.model.state.BeliefState class LogicActionSpec extends FlatSpec with Matchers { behavior of "Logic Action" val state = BeliefState('A & 'B & ~'C) val satState = BeliefState(~'A) val action = LogicAction(~'A, 'A) it should "throw an exception when effect or precondition aren't satisfiable" in { an [IllegalArgumentException] should be thrownBy { val action = LogicAction(~'A, 'A & 'B & ~'B) } an [IllegalArgumentException] should be thrownBy { val action = LogicAction(~'A & 'A, 'B) } } it should "compute it's effect and precondition attributes" in { val action = LogicAction('A & ~'B, 'C & 'D) action.attrs should contain theSameElementsAs List('A, 'B, 'C, 'D) } it should "not be applicable when precondition is not satisfied" in { action.applicableIn(state) shouldBe false } it should "be applicable when precondition is satisfied" in { action.applicableIn(satState) shouldBe true } it should "result in the same state when executed in state where precondition is not satisfied" in { action.applicableIn(state) shouldBe false action.result(state) shouldBe state } it should "result in the initial state with applied effect when precondition is satisfied" in { action.applicableIn(satState) shouldBe true action.result(satState) shouldBe BeliefState('A) } it should "treat unknown predicates both in precondition and given state" in { action.applicableIn(BeliefState('A.?)) shouldBe false LogicAction('A?, 'A) applicableIn BeliefState('A?) shouldBe true LogicAction('A?, 'A) applicableIn BeliefState('A) shouldBe true LogicAction('A?, 'A) applicableIn BeliefState(~'A) shouldBe true LogicAction(('A?) & 'B, 'A) applicableIn BeliefState(('A?) & 'B) shouldBe true LogicAction(('A?) & ~'B, 'A) applicableIn BeliefState('A & ~'B) shouldBe true LogicAction(('A?) & ('B?), 'A) applicableIn BeliefState(~'A & ('B?)) shouldBe true } it should "result known predicate in unknown one when effect contains it" in { LogicAction('A, 'A?) result BeliefState('A) shouldBe BeliefState('A?) LogicAction('A, ('A?) & 'B) result BeliefState('A) shouldBe BeliefState(('A?) & 'B) } it should "treat unset predicates as unknown both in precondition and given state" in { LogicAction(~'A, 'A) applicableIn BeliefState('B) shouldBe false LogicAction('A?, 'A) applicableIn BeliefState('B) shouldBe true LogicAction(('A?) & ('B?), 'A) applicableIn BeliefState('A) shouldBe true LogicAction(('A?) & ('B?), 'A) applicableIn BeliefState(~'A) shouldBe true } }
dreef3/glowing-avenger
src/test/scala/com/glowingavenger/plan/model/action/LogicActionSpec.scala
Scala
mit
2,753
package io.coding.me.m2p2.core.internal.model import org.scalatest.WordSpecLike import org.scalatest.Matchers import java.io.File /** * @author [email protected] */ class MavenMetadataTest extends WordSpecLike with Matchers { "A valid Maven metadata file" should { "not return a failure when parsing a file" in { val mavenMetadata = MavenMetadata(new File(getClass.getResource("/reference_files/maven-metadata.xml").toURI())) mavenMetadata should not be None mavenMetadata.get should not be empty mavenMetadata.get.size shouldBe 4 mavenMetadata.get.head shouldBe MavenArtifact("io.coding-me", "test", "1.4.0-20150723.181352-1", "jar", Some("javadoc")) } } }
coding-me/maven-p2-view
core/src/test/scala/io/coding/me/m2p2/core/internal/model/MavenMetadataTest.scala
Scala
mit
709
package org.workcraft.plugins.cpog.scala trait View [T] { }
tuura/workcraft-2.2
CpogsPlugin/src/main/scala/org/workcraft/plugins/cpog/scala/View.scala
Scala
gpl-3.0
62
package com.tribbloids.spookystuff import com.tribbloids.spookystuff.execution.NodeKey import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema package object row { type Data = Map[Field, Any] //TODO: change to SQL Row implicit val Data: Map.type = Map type SquashedFetchedRDD = RDD[SquashedFetchedRow] type BeaconRDD[K] = RDD[(K, Unit)] type Sampler[T] = Iterable[(T, Int)] => Iterable[(T, Int)] //with index type RowReducer = (Iterable[DataRow], Iterable[DataRow]) => Iterable[DataRow] type RowOrdering = Ordering[(NodeKey, Iterable[DataRow])] // f(open, visited) => open // type RowEliminator = (Iterable[DataRow], Iterable[DataRow]) => Iterable[DataRow] // In every execution plan, the schema: Map(Field -> DataType) has to be created on construction, which enables every Field to be cast into TypedField or IndexedField type IndexedField = (TypedField, Int) type SchemaRow = GenericRowWithSchema }
tribbloid/spookystuff
core/src/main/scala/com/tribbloids/spookystuff/row/package.scala
Scala
apache-2.0
983
package eu.henkelmann.actuarius import collection.immutable.StringOps import collection.mutable.ListBuffer import xml.{Group, Node, Text, NodeSeq, Elem => XmlElem, TopScope, XML} import xml.parsing.XhtmlParser import util.parsing.combinator.{Parsers, RegexParsers} /** * A parser for the markdown language. * Works on pre-parsed lines that can be created by a LineParser. */ trait BlockParsers extends Parsers { type Elem = MarkdownLine //why does this not allow us to access the lookup map in the lookup parser? //override type Input = MarkdownLineReader //hmm, compiler does not accept this, though MarkdownLineReader extends Reader[MarkdownLine]... /** * Used to define the output format of parsed blocks and whether verbatim xml blocks are allowed. */ def deco():Decorator = Decorator /** * returns the current indentation string repeated the given number of levels */ def indent(level:Int):String = deco.indentation * level private val tokenizer = new LineTokenizer() /** A markdown block element. */ sealed abstract class MarkdownBlock extends InlineParsers{ override def deco = BlockParsers.this.deco /** adds the resulting xhtml snippet to the given string builder */ def addResult(level:Int, out:StringBuilder):Unit /** returns the resulting xhtml snippet as a string */ def result():String = { val sb = new StringBuilder addResult(0, sb) sb.toString } } ////////////////////////// // non-recursive blocks // ////////////////////////// /**:? * Represents a block of verbatim xml */ class VerbatimXml(line:XmlChunk) extends MarkdownBlock { def addResult(level:Int, out:StringBuilder) {out.append(this.deco.decorateXml(line.content))} } /** * Represents a horizontal ruler */ object Ruler extends MarkdownBlock { def addResult(level:Int, out:StringBuilder) {out.append(indent(level)).append(deco.decorateRuler)} } /** * Represents a header */ case class Header(content:String, headerLevel:Int, lookup:Map[String, LinkDefinition]) extends MarkdownBlock{ def addResult(level:Int, out:StringBuilder) { out.append(indent(level)).append(deco.decorateHeaderOpen(headerLevel)) .append(applyInline(content, lookup)) .append(indent(level)).append(deco.decorateHeaderClose(headerLevel)) } } /** * Represents a block of verbatim qouted code */ class CodeBlock(lines:List[MarkdownLine]) extends MarkdownBlock{ def addResult(level:Int, out:StringBuilder) { out.append(indent(level)).append(deco.decorateCodeBlockOpen) for (line <- lines) { val escaped = escapeXml(line.payload) out.append(escaped).append('\\n') //out.append(line.content) } out.append(indent(level)).append(deco.decorateCodeBlockClose) } } class FencedCodeBlock(language:String, lines:List[MarkdownLine]) extends MarkdownBlock{ def addResult(level:Int, out:StringBuilder) { out.append(indent(level)).append(deco.decorateCodeBlockOpen) for (line <- lines) { val escaped = escapeXml(line.fullLine) out.append(escaped).append('\\n') //out.append(line.content) } out.append(indent(level)).append(deco.decorateCodeBlockClose) } } /** * Represents a paragraph of text */ class Paragraph(lines:List[MarkdownLine], lookup:Map[String, LinkDefinition]) extends MarkdownBlock{ def addResult(level:Int, out:StringBuilder) { out.append(indent(level)).append(deco.decorateParagraphOpen) addResultPlain(level, out) out.append(indent(level)).append(deco.decorateParagraphClose) } /** * Adds the result without any decoration, (no wrapping tags) * Used for building list items that don't have their content wrappend in paragraphs */ def addResultPlain(level:Int, out:StringBuilder) { val temp = new StringBuilder() lines.foreach(line => temp.append(indent(level)).append(line.payload).append('\\n')) val result = applyInline(temp.toString, lookup) out.append(result) //lines.foreach(line => out.append(indent(level)).append(escapeXml(line.content))) //drop last newline so paragraph closing tag ends the line if (!out.isEmpty && out.charAt(out.length-1) == '\\n') out.deleteCharAt(out.length-1) } } ////////////////////// // recursive blocks // ////////////////////// /** * Represents a quoted text block. Text in the block is recursively evaluated. */ class Blockquote(lines:List[MarkdownLine], lookup:Map[String, LinkDefinition]) extends MarkdownBlock { def addResult(level:Int, out:StringBuilder) { //the block parser needs to recurse: val innerLines = lines.map(line => line.payload) val reader = BlockParsers.this.tokenizer.innerTokenize(innerLines, lookup) //now apply the normal markdown parser to the new content val innerBlocks = BlockParsers.this.applyBlocks(reader) //wrap the resulting blocks in blockquote tags out.append(indent(level)).append(deco.decorateBlockQuoteOpen) innerBlocks.foreach(block => block.addResult(level+1, out)) out.append(indent(level)).append(deco.decorateBlockQuoteClose) } } /** * Helper class to build lists. Allows easy checking if an item ends with empty lines and * recursively builds the content of an item. */ class ListItem(val lines:List[MarkdownLine], lookup:Map[String, LinkDefinition]) extends LineParsers { def endsWithNewline = lines.size > 1 && (lines.last.isInstanceOf[EmptyLine]) def addResult(level:Int, out:StringBuilder, paragraph_? : Boolean) { out.append(indent(level)).append(deco.decorateItemOpen) //the block parser needs to recurse: val innerLines = lines.map(line => line.payload) val reader = BlockParsers.this.tokenizer.innerTokenize(innerLines, lookup) //now apply the normal markdown parser to the new content val innerBlocks = BlockParsers.this.applyBlocks(reader) innerBlocks match { case (p:Paragraph) :: Nil if (!paragraph_?) => p.addResultPlain(level+1, out) case _ => innerBlocks.foreach(block => block.addResult(level+1, out)) } out.append(indent(level)).append(deco.decorateItemClose) } } /** * Base class for ordered and unordered lists, allows for correct handling of paragraphs in lists. */ abstract class ListBlock (items:List[ListItem]) extends MarkdownBlock { /** * This method recursively goes through the given list and adds the items contents. * It checks the previous item if it ends with empty lines. If it does, it signals the * current item to create paragraphs. In order for this method to work it has to be * called with the first item prepended twice in front of the list. So if the list is * a::b::c, call this method with a::a::b::c */ protected def addResult(level:Int, out:StringBuilder, list:List[ListItem]):Unit = list match{ case last::current::rest => { current.addResult(level + 1, out, last.endsWithNewline) addResult(level, out, current::rest) } case _ => {}//end of recursion, list with one item or less } /** * calls recursive handling of nested items */ def addResult(level:Int, out:StringBuilder) { addResult(level, out, items.head::items) } } /** * An ordered (i.e. numbered) list of items. */ class OList (items:List[ListItem]) extends ListBlock(items) { override def addResult(level:Int, out:StringBuilder) { out.append(indent(level)).append(deco.decorateOListOpen) super.addResult(level, out) out.append(indent(level)).append(deco.decorateOListClose) } } /** * An unordered list of items. */ class UList (items:List[ListItem]) extends ListBlock(items) { override def addResult(level:Int, out:StringBuilder) { out.append(indent(level)).append(deco.decorateUListOpen) super.addResult(level, out) out.append(indent(level)).append(deco.decorateUListClose) } } ///////////////////////////////////////////////////////////// //////////////// helpers ///////////////////// ///////////////////////////////////////////////////////////// /** * Parses a line of the given type T */ def line[T](c:Class[T]):Parser[T] = Parser {in => if (in.first.getClass == c) Success(in.first.asInstanceOf[T], in.rest) else Failure("Not a fitting line.", in) } /** * Parses a line of any type *but* T */ def notLine[T](c:Class[T]):Parser[MarkdownLine] = Parser {in => if (in.atEnd) Failure("At end of input.", in) else if (in.first.getClass == c) Failure("Not a fitting line.", in) else Success(in.first, in.rest) } /** * Parses any line. */ def anyLine:Parser[MarkdownLine] = Parser {in => if (in.atEnd) Failure("End of input reached.", in) else Success(in.first, in.rest) } def emptyLine:Parser[EmptyLine] = line(classOf[EmptyLine]) /**accepts zero or more empty lines */ def optEmptyLines:Parser[List[MarkdownLine]] = emptyLine* /** accepts one or more empty lines */ def emptyLines:Parser[List[MarkdownLine]] = emptyLine+ /** returns the current link lookup from the reader * always succeeds, never consumes input */ def lookup:Parser[Map[String, LinkDefinition]] = Parser { in => //why is the instanceof necessary? re-declaring type Input above does not change anything :( Success(in.asInstanceOf[MarkdownLineReader].lookup, in) } /////////////////// // Block parsers // /////////////////// def atxHeader:Parser[Header] = line(classOf[AtxHeaderLine]) ~ lookup ^^ { case l ~ lu => new Header(l.trimHashes, l.headerLevel, lu) } def setExtHeader:Parser[Header] = not(emptyLine) ~> anyLine ~ line(classOf[SetExtHeaderLine]) ~ lookup ^^ {case l ~ setext ~ lu => new Header(l.fullLine.trim, setext.headerLevel, lu)} /** parses a horizontal ruler */ def ruler:Parser[MarkdownBlock] = (line(classOf[RulerLine]) | line(classOf[SetExtHeaderLine])) ^^^ {Ruler} /** parses a verbatim xml block */ def verbatimXml:Parser[VerbatimXml] = line(classOf[XmlChunk]) ^^ {new VerbatimXml(_)} /** parses a code block */ def codeBlock:Parser[CodeBlock] = line(classOf[CodeLine]) ~ ((optEmptyLines ~ line(classOf[CodeLine]))*) ^^ { case l ~ pairs => new CodeBlock( l :: pairs.map({case (a~b) => a++List(b)}).flatten ) } /** * Parses a fenced code block: a line starting a fenced code block with * "```", followed by any lines that do not stop it, optionally followed * by the ending line. Optionally parsing the stopping line causes the * code block to extend to the end of the document. (This is the github * behavior, where omitting the line closing the code block causes the * block to extend to the end of the document as well) */ def fencedCodeBlock:Parser[FencedCodeBlock] = (line(classOf[ExtendedFencedCode])|line(classOf[FencedCode])) ~ (notLine(classOf[FencedCode])*) ~ opt(line(classOf[FencedCode]))^^ { case (start:ExtendedFencedCode) ~ lines ~ _ => new FencedCodeBlock(start.languageFormat, lines) case _ ~ lines ~ _ => new FencedCodeBlock("", lines) } //line(classOf[FencedCodeStart]) ~ //((not(line(classOf[FencedCodeEnd]))*) ~ //opt(line(classOf[FencedCodeEnd])) ^^ { // case start ~ lines ~ end => new CodeBlock(lines.map(_.fullLine)) //} /** a consecutive block of paragraph lines * returns the content of the matched block wrapped in <p> tags */ def paragraph:Parser[Paragraph] = lookup ~ (line(classOf[OtherLine])+) ^^ {case lu ~ ls => new Paragraph(ls, lu)} /** * Parses a blockquote fragment: a block starting with a blockquote line followed * by more blockquote or paragraph lines, ends optionally with empty lines */ def blockquoteFragment:Parser[List[MarkdownLine]] = line(classOf[BlockQuoteLine]) ~ ((line(classOf[BlockQuoteLine]) | line(classOf[OtherLine]))*) ~ (optEmptyLines) ^^ { case l ~ ls ~ e => (l :: ls ++ e) } /** * Parses a quoted block. A quoted block starts with a line starting with "> " * followed by more blockquote lines, paragraph lines following blockqoute lines * and may be interspersed with empty lines */ def blockquote:Parser[Blockquote] = lookup ~ (blockquoteFragment+) ^^ { case lu ~ fs => new Blockquote(fs.flatten, lu) } /** * parses a list of lines that may make up the body of a list item */ def itemLines:Parser[List[MarkdownLine]] = ((line(classOf[CodeLine])|line(classOf[OtherLine]))*) /** * The continuation of a list item: * A line indented by four spaces or a tab (a continuation line), followed by more continuation or paragraph * lines followed by empty lines */ def itemContinuation:Parser[List[MarkdownLine]] = optEmptyLines ~ line(classOf[CodeLine]) ~ itemLines ^^ { case e ~ c ~ cs => e ++ (c :: cs) } /**parses an item in an unsorted list */ def uItem:Parser[ListItem] = lookup ~ line(classOf[UItemStartLine]) ~ itemLines ~ (itemContinuation*) ~ optEmptyLines ^^ { case lu ~ s ~ ls ~ cs ~ e => new ListItem(s :: ls ++ cs.flatten ++ e, lu) } /**parses an item in a sorted list */ def oItem:Parser[ListItem] = lookup ~ line(classOf[OItemStartLine]) ~ itemLines ~ (itemContinuation*) ~ optEmptyLines ^^ { case lu ~ s ~ ls ~ cs ~ e => new ListItem(s :: ls ++ cs.flatten ++ e, lu) } /** parses an unordered list */ def uList:Parser[UList] = (uItem+) ^^ {new UList(_)} /** parses an ordered list */ def oList:Parser[OList] = (oItem+) ^^ {new OList(_)} /////////////////////////////////////////////////////////////// /////////////////// high level processing ///////////////////// /////////////////////////////////////////////////////////////// /** * parses first level blocks (all blocks, including xml) */ def outerBlock:Parser[MarkdownBlock] = (verbatimXml <~ optEmptyLines) | innerBlock /** * speed up block processing by looking ahead */ def fastBlock:Parser[MarkdownBlock] = Parser { in => if (in.atEnd) { Failure("End of Input.", in) } else { in.first match { case l:AtxHeaderLine => atxHeader(in) case l:RulerLine => ruler(in) //setext headers have been processed before we are called, so this is safe case l:SetExtHeaderLine => ruler(in) case l:CodeLine => codeBlock(in) case l:ExtendedFencedCode => fencedCodeBlock(in) case l:FencedCode => fencedCodeBlock(in) case l:BlockQuoteLine => blockquote(in) case l:OItemStartLine => oList(in) case l:UItemStartLine => uList(in) case _ => paragraph(in) } } } /** * parses inner blocks (everything excluding xml) */ def innerBlock:Parser[MarkdownBlock] = (setExtHeader | fastBlock) <~ optEmptyLines /** * a markdown parser */ def markdown:Parser[List[MarkdownBlock]] = optEmptyLines ~> (outerBlock*) /** Generic apply method to run one of our pasers on the given input. */ def apply[T](p:Parser[T], in:MarkdownLineReader):T = { phrase(p)(in) match { case Success(t, _) => t case e: NoSuccess => throw new IllegalArgumentException("Could not parse '" + in + "': " + e) } } /** parses all blocks from the given reader */ def applyBlocks(in:MarkdownLineReader):List[MarkdownBlock] = apply((optEmptyLines ~> (innerBlock*)), in) /** Generic apply method to test a single parser */ def apply[T](p:Parser[T], list:List[MarkdownLine]):T = apply(p, new MarkdownLineReader(list)) /** Parses the given input as a markdown document and returns the string result */ def apply(in:MarkdownLineReader):String = { phrase(markdown)(in) match { case Success(bs, _) => { val builder = new StringBuilder() bs.foreach(block => block.addResult(0, builder)) builder.toString } case e: NoSuccess => throw new IllegalArgumentException("Could not parse " + in + ": " + e) } } }
KimStebel/actuarius
src/main/scala/eu/henkelmann/actuarius/BlockParsers.scala
Scala
bsd-3-clause
17,815
package org.odfi.indesign.core.module.ui.www.layout import java.net.URI import org.odfi.indesign.core.module.ui.www.external.ExternalBuilder import com.idyria.osi.vui.html.HTMLNode import org.w3c.dom.html.HTMLElement trait UILayoutBuilder extends ExternalBuilder { override def externalAdd(targetNode: HTMLNode[HTMLElement, Any]): Unit = { super.externalAdd(targetNode) switchToNode(targetNode, { var n = script(new URI(createSpecialPath("resources", "modules/wwwui/indesign-layout.js"))) { } }) } }
opendesignflow/indesign
indesign-wwwui/src/main/scala/org/odfi/indesign/core/module/ui/www/layout/UILayoutBuilder.scala
Scala
gpl-3.0
544
/** * scala-relaxng * For all details and documentation: * http://github.com/inkling/scala-relaxng * * Copyright 2011 Inkling Systems, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.inkling.relaxng.test import scala.util.parsing.combinator._ import scala.util.parsing.input._ import scala.text.Document import java.io.StringWriter import java.net.URI import org.scalatest._ import org.scalatest.prop._ import org.scalacheck.{Gen, Prop, Arbitrary} import org.scalacheck.Gen._ import org.scalacheck.Prop._ import org.scalacheck.Arbitrary._ /** * A spec class to more fluidly support using spec and checkers together */ class CheckSpec extends Spec with Checkers { /** Little wrapper because I never "it" without a "check" */ def checkit(description: String)(prop: =>Prop) { it(description) { check(prop) } } }
inkling/scala-relaxng
src/test/scala/CheckSpec.scala
Scala
apache-2.0
1,359
/** * Copyright: Copyright (C) 2016, Jaguar Land Rover * License: MPL-2.0 */ package org.genivi.sota.core import akka.actor.ActorSystem import akka.event.Logging import akka.http.scaladsl.util.FastFuture import cats.Show import eu.timepit.refined.string._ import org.genivi.sota.common.DeviceRegistry import org.genivi.sota.core.data._ import org.genivi.sota.core.db._ import org.genivi.sota.core.resolver.Connectivity import org.genivi.sota.core.transfer.UpdateNotifier import org.genivi.sota.data.{Namespace, PackageId, Uuid} import java.time.Instant import java.util.UUID import org.genivi.sota.core.db.UpdateSpecs.UpdateSpecRow import scala.collection.immutable.ListSet import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NoStackTrace import slick.dbio.DBIO import slick.driver.MySQLDriver.api._ case class PackagesNotFound(packageIds: (PackageId)*) (implicit show: Show[PackageId]) extends Throwable(s"""Package(s) not found: ${packageIds.map(show.show).mkString(", ")}""") with NoStackTrace case class UploadConf( chunkSize: Int, downloadSplitStrategy: Set[Package] => Vector[Download] ) object UploadConf { implicit val default = UploadConf( chunkSize = 64 * 1024, downloadSplitStrategy = packages => { packages.map(p => Download.apply(Vector(p))).toVector } ) } class UpdateService(notifier: UpdateNotifier, deviceRegistry: DeviceRegistry) (implicit val system: ActorSystem, val connectivity: Connectivity, val ec: ExecutionContext) { import UpdateService._ implicit private val log = Logging(system, "updateservice") def checkDevices( dependencies: DeviceToPackageIds ) : Future[Boolean] = FastFuture.successful( true ) /** * Fetch from DB the [[Package]]s corresponding to the given [[PackageId]]s, * failing in case not all could be fetched. */ def fetchPackages(ns: Namespace, requirements: Set[PackageId] ) (implicit db: Database, ec: ExecutionContext): Future[Seq[Package]] = { def missingPackages( required: Set[PackageId], found: Seq[Package] ) : Set[PackageId] = { val result = required -- found.map( _.id ) if( result.nonEmpty ) log.debug( s"Some of required packages not found: $result" ) result } for { foundPackages <- db.run(Packages.byIds(ns, requirements)) mapping <- if( requirements.size == foundPackages.size ) { FastFuture.successful( foundPackages ) } else { FastFuture.failed( PackagesNotFound( missingPackages(requirements, foundPackages).toArray: _*)) } } yield mapping } def fetchUpdateSpecRows(id: Uuid)(implicit db: Database, ec: ExecutionContext): Future[Seq[UpdateSpecRow]] = db.run(UpdateSpecs.listUpdatesById(id)) def loadPackage(id: UUID) (implicit db: Database, ec: ExecutionContext): Future[Package] = { val dbIO = Packages.byUuid(id).flatMap(BlacklistedPackages.ensureNotBlacklisted) db.run(dbIO) } /** * For each of the given (VIN, dependencies) prepare an [[UpdateSpec]] * that points to the given [[UpdateRequest]] and has [[UpdateStatus]] "Pending". * <p> * No install order is specified for the single [[UpdateSpec]] that is prepared per VIN. * However, a timestamp is included in each [[UpdateSpec]] to break ties * with any other (already persisted) [[UpdateSpec]]s that might be pending. * * @param vinsToPackageIds several VIN-s and the dependencies for each of them * @param idsToPackages lookup a [[Package]] by its [[PackageId]] */ def mkUpdateSpecs(request: UpdateRequest, vinsToPackageIds: DeviceToPackageIds, idsToPackages: Map[PackageId, Package]): Set[UpdateSpec] = { vinsToPackageIds.map { case (device, requiredPackageIds) => UpdateSpec.default(request, device).copy(dependencies = requiredPackageIds.map(idsToPackages)) }.toSet } def persistRequest(request: UpdateRequest, updateSpecs: Set[UpdateSpec]) (implicit db: Database, ec: ExecutionContext) : Future[Unit] = { val updateReqIO = UpdateRequests.persist(request) val updateSpecsIO = DBIO.sequence(updateSpecs.map(UpdateSpecs.persist).toSeq) val dbIO = updateReqIO.andThen(updateSpecsIO).map(_ => ()) db.run(dbIO.withPinnedSession.transactionally) } /** * <ul> * <li>For the [[Package]] of the given [[UpdateRequest]] find the vehicles where it needs to be installed,</li> * <li>For each such VIN create an [[UpdateSpec]]</li> * <li>Persist in DB all of the above</li> * </ul> */ def queueUpdate(namespace: Namespace, request: UpdateRequest, resolver: DependencyResolver ) (implicit db: Database, ec: ExecutionContext): Future[Set[UpdateSpec]] = { for { pckg <- loadPackage(request.packageUuid) vinsToDeps <- resolver(pckg) requirements = allRequiredPackages(vinsToDeps) packages <- fetchPackages(namespace, requirements) _ <- db.run(BlacklistedPackages.ensureNotBlacklistedIds(namespace)(packages.map(_.id))) idsToPackages = packages.map( x => x.id -> x ).toMap updateSpecs = mkUpdateSpecs(request, vinsToDeps, idsToPackages) _ <- persistRequest(request, updateSpecs) _ <- Future.successful(notifier.notify(updateSpecs.toSeq)) } yield updateSpecs } def allRequiredPackages(deviceToDeps: Map[Uuid, Set[PackageId]]): Set[PackageId] = { log.debug(s"Dependencies from resolver: $deviceToDeps") deviceToDeps.values.flatten.toSet } def updateRequest(ns: Namespace, packageId: PackageId) (implicit db: Database, ec: ExecutionContext): Future[UpdateRequest] = db.run(Packages.byId(ns, packageId).flatMap(BlacklistedPackages.ensureNotBlacklisted)).map { p => val newUpdateRequest = UpdateRequest.default(ns, p.uuid) newUpdateRequest.copy(signature = p.signature.getOrElse(newUpdateRequest.signature), description = p.description) } /** * For the given [[PackageId]] and vehicle, persist a fresh [[UpdateRequest]] and a fresh [[UpdateSpec]]. * Resolver is not contacted. */ def queueDeviceUpdate(ns: Namespace, device: Uuid, packageId: PackageId) (implicit db: Database, ec: ExecutionContext): Future[(UpdateRequest, UpdateSpec, Instant)] = { for { updateRequest <- updateRequest(ns, packageId) spec = UpdateSpec.default(updateRequest, device) _ <- persistRequest(updateRequest, ListSet(spec)) } yield (updateRequest, spec, spec.updateTime) } def all(namespace: Namespace)(implicit db: Database, ec: ExecutionContext): Future[Seq[UpdateRequest]] = db.run(UpdateRequests.list(namespace)) } object UpdateService { type DeviceToPackageIds = Map[Uuid, Set[PackageId]] type DependencyResolver = Package => Future[DeviceToPackageIds] }
PDXostc/rvi_sota_server
core/src/main/scala/org/genivi/sota/core/UpdateService.scala
Scala
mpl-2.0
7,070
package mesosphere.marathon.metrics import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicInteger import com.codahale.metrics.{Gauge, MetricRegistry} import com.google.inject.Inject import mesosphere.marathon.metrics.Metrics.{ Histogram, Meter, Timer, Counter } import org.aopalliance.intercept.MethodInvocation import scala.collection.concurrent.TrieMap import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration import scala.util.control.NonFatal /** * Utils for timer metrics collection. */ class Metrics @Inject() (val registry: MetricRegistry) { private[this] val classNameCache = TrieMap[Class[_], String]() def timed[T](name: String)(block: => T): T = { val timer = registry.timer(name) val startTime = System.nanoTime() try { block } finally { timer.update(System.nanoTime() - startTime, TimeUnit.NANOSECONDS) } } def counter(name: String): Counter = { new Counter(registry.counter(name)) } def timer(name: String): Timer = { new Timer(registry.timer(name)) } def meter(name: String): Meter = { new Meter(registry.meter(name)) } def histogram(name: String): Histogram = { new Histogram(registry.histogram(name)) } @throws[IllegalArgumentException]("if this function is called multiple times for the same name.") def gauge[G <: Gauge[_]](name: String, gauge: G): G = { registry.register(name, gauge) gauge } def name(prefix: String, clazz: Class[_], method: String): String = { s"${prefix}.${className(clazz)}.${method}" } def name(prefix: String, in: MethodInvocation): String = { name(prefix, in.getThis.getClass, in.getMethod.getName) } def className(clazz: Class[_]): String = { classNameCache.getOrElseUpdate(clazz, stripGuiceMarksFromClassName(clazz)) } private[metrics] def stripGuiceMarksFromClassName(clazz: Class[_]): String = { val name = clazz.getName if (name.contains("$EnhancerByGuice$")) clazz.getSuperclass.getName else name } } object Metrics { class Counter(counter: com.codahale.metrics.Counter) { def inc(): Unit = counter.inc() def dec(): Unit = counter.dec() } class Timer(private[metrics] val timer: com.codahale.metrics.Timer) { def timeFuture[T](future: => Future[T]): Future[T] = { val startTime = System.nanoTime() val f = try future catch { case NonFatal(e) => timer.update(System.nanoTime() - startTime, TimeUnit.NANOSECONDS) throw e } import mesosphere.util.CallerThreadExecutionContext.callerThreadExecutionContext f.onComplete { case _ => timer.update(System.nanoTime() - startTime, TimeUnit.NANOSECONDS) } f } def apply[T](block: => T): T = { val startTime = System.nanoTime() try { block } finally { timer.update(System.nanoTime() - startTime, TimeUnit.NANOSECONDS) } } def update(duration: FiniteDuration): Unit = timer.update(duration.toMillis, TimeUnit.MILLISECONDS) def invocationCount: Long = timer.getCount } class Histogram(histogram: com.codahale.metrics.Histogram) { def update(value: Long): Unit = { histogram.update(value) } def update(value: Int): Unit = { histogram.update(value) } } class Meter(meter: com.codahale.metrics.Meter) { def mark(): Unit = meter.mark() def mark(n: Long): Unit = meter.mark(n) def mark(n: Int): Unit = meter.mark(n.toLong) } class AtomicIntGauge extends Gauge[Int] { private[this] val value_ = new AtomicInteger(0) def setValue(l: Int): Unit = value_.set(l) override def getValue: Int = value_.get() def increment(): Int = value_.incrementAndGet() def decrement(): Int = value_.decrementAndGet() } }
ss75710541/marathon
src/main/java/mesosphere/marathon/metrics/Metrics.scala
Scala
apache-2.0
3,840
package edu.umass.ciir.strepsi.distribution import edu.umass.ciir.strepsi.{LogTools, SeqTools} /** * User: dietz * Date: 8/30/13 * Time: 12:09 PM */ case class Distribution[Elem](nonzeroDistr: Seq[(Elem, Double)], zeroEntries: Int = 0, zeroEntryNames: PartialFunction[Int, Elem] = Seq.empty, zeroCountProb: Double = 0.0, isLog: Boolean = false ) extends CategoricalCompressedDistribution[Elem] { type concreteType = Distribution[Elem] assert(nonzeroDistr.map(_._1).distinct.length == nonzeroDistr.length, { "input for nonzeroDistr is expected to only contain unique elements. But distinct elements=" + nonzeroDistr.map(_._1).distinct.length + " num elements=" + nonzeroDistr.length + ". NonZeroDistr = " + nonzeroDistr.toList }) if (zeroEntries > 0) assert(zeroEntryNames.isDefinedAt(zeroEntries - 1), { "zeroEntryNames are not defined at position " + (zeroEntries - 1) + " zeroEntryNames=" + (0 until zeroEntries).map(zeroEntryNames).toSeq }) def nonzeroHashView: Map[Elem, Double] = nonzeroDistr.toMap def builtNonZeroDistrFrom(distr: Seq[(Elem, Double)], zeroEntries: Int, zeroEntryNames: PartialFunction[Int, Elem], zeroCountProb: Double, isLog: Boolean ) = { Distribution(distr, zeroEntries, zeroEntryNames, zeroCountProb, isLog) } def builtDistrFrom(distr: Seq[(Elem, Double)], isLog: Boolean) = { Distribution(nonzeroDistr = distr, 0, zeroEntryNames, zeroCountProb, isLog) } override def toString = { "Distribution(" + nonzeroDistr.sortBy(-_._2).toString + " " + (0 until zeroEntries).map(zeroEntryNames.applyOrElse(_, "??") -> zeroCountProb).toString + ")" } } /** * User: dietz * Date: 8/14/13 * Time: 5:16 PM */ trait CategoricalCompressedDistribution[Elem] { assert(!(nonzeroDistr.isEmpty && zeroEntries == 0), { "Can't instantiate empty distribution" }) type concreteType <: CategoricalCompressedDistribution[Elem] def builtDistrFrom(distr: Seq[(Elem, Double)], isLog: Boolean): concreteType def builtNonZeroDistrFrom(distr: Seq[(Elem, Double)], zeroEntries: Int, zeroEntryNames: PartialFunction[Int, Elem], zeroCountProb: Double, isLog: Boolean ): concreteType def nonzeroDistr: Seq[(Elem, Double)] def nonzeroHashView: Map[Elem, Double] def zeroEntries: Int def zeroEntryNames: PartialFunction[Int, Elem] def zeroCountProb: Double def isLog: Boolean def distr = { nonzeroDistr ++ (0 until zeroEntries).map(zeroEntryNames(_) -> zeroCountProb) } def universe: Iterable[Elem] = { nonzeroDistr.map(_._1) ++ (0 until zeroEntries).map(zeroEntryNames) } def topK(k: Int): concreteType = { val nonzeroPart = SeqTools.topK(nonzeroDistr, k) val numZeros = math.min(k - nonzeroPart.length, zeroEntries) builtNonZeroDistrFrom(nonzeroPart, numZeros, zeroEntryNames, zeroCountProb, isLog) } def normalize: concreteType = { if (isLog) logExpSumNormalize else divideByMarginal } def divideByMarginal: concreteType = { val marg = marginal builtNonZeroDistrFrom(nonzeroDistr.map(elem => (elem._1, elem._2 / marg)), zeroEntries, zeroEntryNames, zeroCountProb / marg, isLog) } def logExpSumNormalize: concreteType = { builtDistrFrom(LogTools.logExpSumNormalizeBase(distr), isLog = false) } def getArgMax: (Elem, Double) = { val nonZeroEntry = nonzeroDistr.maxBy(_._2) if (nonZeroEntry._2 < zeroCountProb && zeroEntries > 0) { (zeroEntryNames(0) -> zeroCountProb) } else { nonZeroEntry } } def mean: Double = { 1.0 * nonzeroDistr.map(_._2).sum / nonzeroDistr.length } def marginal: Double = { assert(!isLog) assert(nonzeroDistr.forall(_._2 >= 0) || zeroCountProb > 0.0 && zeroEntries > 0) nonzeroDistr.view.map(_._2).sum + zeroEntries * zeroCountProb } /** * * @param random a random number between 0 and marginal * @return */ def draw(random: Double): Elem = { if (nonzeroDistr.isEmpty && zeroEntries == 0) throw new RuntimeException("can't draw from a CategoricalDistribution with no elements") if (random < 0.0) throw new RuntimeException("random seed must be >= 0.0") if (random > marginal) throw new RuntimeException("random seed must be <= this.marginal") def op(thread: (Option[Elem], Double), entry: (Elem, Double) ): (Option[Elem], Double) = (Some(entry._1), thread._2 + entry._2) val virtualFullDistr = nonzeroDistr.view ++ (0 until zeroEntries).view.map(zeroEntryNames(_) -> zeroCountProb) val scanned: Iterable[(Option[Elem], Double)] = virtualFullDistr.scanLeft[(Option[Elem], Double), Iterable[(Option[Elem], Double)]]((None, 0.0))(op) val elem = scanned.dropWhile(_._2 <= random).headOption match { case Some((Some(elem), _)) => elem case None => { virtualFullDistr.last._1 // happens if random == marg, even for zeroEntries=0, then return the last element as well } case Some((None, _)) => { // this should not happen throw new RuntimeException("Program error: did not find any element in " + virtualFullDistr + " that represents cummulative probability " + random + ".") } } elem } def expectedCounts(expectedLength: Double): Map[Elem, Int] = { assert(!isLog) (for (elem <- universe) yield { val mass = prob(elem) * expectedLength elem -> Distribution.expectation2count(mass) }).toMap } def size: Int = { nonzeroHashView.size + zeroEntries } def prob(elem: Elem): Double = { val p = nonzeroHashView.get(elem) match { case Some(value) => value case None => zeroCountProb } if (isLog) math.exp(p) else p } def head: (Elem, Double) = { if (zeroEntries > 0) zeroEntryNames(0) -> zeroCountProb else { nonzeroDistr.head } } def logarithmize: CategoricalCompressedDistribution[Elem] = { builtNonZeroDistrFrom(distr.map(entry => (entry._1, math.log(entry._2))), zeroEntries, zeroEntryNames, math.log(zeroCountProb), isLog = true) } } object Distribution { def singletonDistr[Elem](elem: Elem): Distribution[Elem] = { Distribution(Seq(elem -> 1.0), isLog = false) } def unionUniverse[Elem](weightedDistrs: Distribution[Distribution[Elem]]): IndexedSeq[Elem] = { weightedDistrs.universe.flatMap(_.universe).toSet.toIndexedSeq } def sumDuplicateKeys[Elem](nonZeroDistr: Seq[(Elem, Double)]): Seq[(Elem, Double)] = { SeqTools.sumSeq(nonZeroDistr) } def mix[Elem](elemUniverse: IndexedSeq[Elem], weightedDistrs: Distribution[Distribution[Elem]] ): Distribution[Elem] = { val normWeightedDistrs = weightedDistrs.normalize val mixedDistrProbs = for (elem <- elemUniverse) yield { val mixedProb = normWeightedDistrs.distr.map { case (distr, weight) => { val y = weight distr.prob(elem) * y } }.sum elem -> mixedProb } Distribution(mixedDistrProbs) } def createDistribution[Elem](nonZero: Seq[(Elem, Double)], universe: Seq[Elem], zeroCountProb: Double = 0.0, isLog: Boolean = false ): Distribution[Elem] = { val unseenKeys = universe diff nonZero.map(_._1) Distribution[Elem](nonZero, universe.size - nonZero.size, zeroEntryNames = unseenKeys, zeroCountProb = zeroCountProb, isLog = isLog) } def expectation2count[Elem](mass: Double): Int = { val floormass = mass.floor.toInt val add = { if (math.random < (mass - floormass)) 1 else 0 } (floormass + add) } def posterior[E,T](data : Iterable[(E, Distribution[T])], prior:(E) => Double = (x:E) => 1.0):Map[T, Distribution[E]] = { val universe = data.map(_._1).toSeq val regrouped = for((entity, distr) <- data; (term,prob) <- distr.nonzeroDistr) yield term -> (entity -> prob * prior(entity)) for( (term, seq) <- SeqTools.groupByKey(regrouped)) yield { term -> Distribution.createDistribution(seq.toSeq, universe = universe).normalize } } // def mixExpectedCounts[Elem](expectedLength:Double, weightedDistrSeqs:Seq[(Seq[Distribution[Elem]],Double)]):Map[Elem,Int] = { // weightedDistrSeqsIndexed = weightedDistrSeqs.toIndexedSeq // val outerSeq: Seq[(Int, Nothing)] = for (((distr, weighted), idx) <- weightedDistrSeqs.zipWithIndex) yield idx -> weight // val outerDistr = Distribution[Int](outerSeq, isLog = true).normalize // // // // } }
laura-dietz/strepsi-tools
src/main/scala/edu/umass/ciir/strepsi/distribution/Distribution.scala
Scala
apache-2.0
8,709
package com.seanshubin.utility.exec import scala.concurrent.{ExecutionContext, Future} class ExecutionContextFutureRunner(implicit executionContext: ExecutionContext) extends FutureRunner { override def runInFuture[T](block: => T): Future[T] = { Future { block } } }
SeanShubin/utility
exec/src/main/scala/com/seanshubin/utility/exec/ExecutionContextFutureRunner.scala
Scala
unlicense
287
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.internal import java.util.{Locale, NoSuchElementException, Properties, TimeZone} import java.util import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicReference import java.util.zip.Deflater import scala.collection.JavaConverters._ import scala.collection.immutable import scala.util.Try import scala.util.control.NonFatal import scala.util.matching.Regex import org.apache.hadoop.fs.Path import org.apache.spark.{SparkConf, SparkContext, TaskContext} import org.apache.spark.internal.Logging import org.apache.spark.internal.config._ import org.apache.spark.internal.config.{IGNORE_MISSING_FILES => SPARK_IGNORE_MISSING_FILES} import org.apache.spark.network.util.ByteUnit import org.apache.spark.sql.catalyst.ScalaReflection import org.apache.spark.sql.catalyst.analysis.{HintErrorLogger, Resolver} import org.apache.spark.sql.catalyst.expressions.CodegenObjectFactoryMode import org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator import org.apache.spark.sql.catalyst.plans.logical.HintErrorHandler import org.apache.spark.sql.catalyst.util.DateTimeUtils import org.apache.spark.sql.connector.catalog.CatalogManager.SESSION_CATALOG_NAME import org.apache.spark.sql.errors.{QueryCompilationErrors, QueryExecutionErrors} import org.apache.spark.sql.types.{AtomicType, TimestampNTZType, TimestampType} import org.apache.spark.unsafe.array.ByteArrayMethods import org.apache.spark.util.Utils //////////////////////////////////////////////////////////////////////////////////////////////////// // This file defines the configuration options for Spark SQL. //////////////////////////////////////////////////////////////////////////////////////////////////// object SQLConf { private[this] val sqlConfEntriesUpdateLock = new Object @volatile private[this] var sqlConfEntries: util.Map[String, ConfigEntry[_]] = util.Collections.emptyMap() private[this] val staticConfKeysUpdateLock = new Object @volatile private[this] var staticConfKeys: java.util.Set[String] = util.Collections.emptySet() private def register(entry: ConfigEntry[_]): Unit = sqlConfEntriesUpdateLock.synchronized { require(!sqlConfEntries.containsKey(entry.key), s"Duplicate SQLConfigEntry. ${entry.key} has been registered") val updatedMap = new java.util.HashMap[String, ConfigEntry[_]](sqlConfEntries) updatedMap.put(entry.key, entry) sqlConfEntries = updatedMap } // For testing only private[sql] def unregister(entry: ConfigEntry[_]): Unit = sqlConfEntriesUpdateLock.synchronized { val updatedMap = new java.util.HashMap[String, ConfigEntry[_]](sqlConfEntries) updatedMap.remove(entry.key) sqlConfEntries = updatedMap } private[internal] def getConfigEntry(key: String): ConfigEntry[_] = { sqlConfEntries.get(key) } private[internal] def getConfigEntries(): util.Collection[ConfigEntry[_]] = { sqlConfEntries.values() } private[internal] def containsConfigEntry(entry: ConfigEntry[_]): Boolean = { getConfigEntry(entry.key) == entry } private[sql] def containsConfigKey(key: String): Boolean = { sqlConfEntries.containsKey(key) } def registerStaticConfigKey(key: String): Unit = staticConfKeysUpdateLock.synchronized { val updated = new util.HashSet[String](staticConfKeys) updated.add(key) staticConfKeys = updated } def isStaticConfigKey(key: String): Boolean = staticConfKeys.contains(key) def buildConf(key: String): ConfigBuilder = ConfigBuilder(key).onCreate(register) def buildStaticConf(key: String): ConfigBuilder = { ConfigBuilder(key).onCreate { entry => SQLConf.registerStaticConfigKey(entry.key) SQLConf.register(entry) } } /** * Merge all non-static configs to the SQLConf. For example, when the 1st [[SparkSession]] and * the global [[SharedState]] have been initialized, all static configs have taken affect and * should not be set to other values. Other later created sessions should respect all static * configs and only be able to change non-static configs. */ private[sql] def mergeNonStaticSQLConfigs( sqlConf: SQLConf, configs: Map[String, String]): Unit = { for ((k, v) <- configs if !staticConfKeys.contains(k)) { sqlConf.setConfString(k, v) } } /** * Extract entries from `SparkConf` and put them in the `SQLConf` */ private[sql] def mergeSparkConf(sqlConf: SQLConf, sparkConf: SparkConf): Unit = { sparkConf.getAll.foreach { case (k, v) => sqlConf.setConfString(k, v) } } /** * Default config. Only used when there is no active SparkSession for the thread. * See [[get]] for more information. */ private lazy val fallbackConf = new ThreadLocal[SQLConf] { override def initialValue: SQLConf = new SQLConf } /** See [[get]] for more information. */ def getFallbackConf: SQLConf = fallbackConf.get() private lazy val existingConf = new ThreadLocal[SQLConf] { override def initialValue: SQLConf = null } def withExistingConf[T](conf: SQLConf)(f: => T): T = { val old = existingConf.get() existingConf.set(conf) try { f } finally { if (old != null) { existingConf.set(old) } else { existingConf.remove() } } } /** * Defines a getter that returns the SQLConf within scope. * See [[get]] for more information. */ private val confGetter = new AtomicReference[() => SQLConf](() => fallbackConf.get()) /** * Sets the active config object within the current scope. * See [[get]] for more information. */ def setSQLConfGetter(getter: () => SQLConf): Unit = { confGetter.set(getter) } /** * Returns the active config object within the current scope. If there is an active SparkSession, * the proper SQLConf associated with the thread's active session is used. If it's called from * tasks in the executor side, a SQLConf will be created from job local properties, which are set * and propagated from the driver side, unless a `SQLConf` has been set in the scope by * `withExistingConf` as done for propagating SQLConf for operations performed on RDDs created * from DataFrames. * * The way this works is a little bit convoluted, due to the fact that config was added initially * only for physical plans (and as a result not in sql/catalyst module). * * The first time a SparkSession is instantiated, we set the [[confGetter]] to return the * active SparkSession's config. If there is no active SparkSession, it returns using the thread * local [[fallbackConf]]. The reason [[fallbackConf]] is a thread local (rather than just a conf) * is to support setting different config options for different threads so we can potentially * run tests in parallel. At the time this feature was implemented, this was a no-op since we * run unit tests (that does not involve SparkSession) in serial order. */ def get: SQLConf = { if (Utils.isInRunningSparkTask) { val conf = existingConf.get() if (conf != null) { conf } else { new ReadOnlySQLConf(TaskContext.get()) } } else { val isSchedulerEventLoopThread = SparkContext.getActive .flatMap { sc => Option(sc.dagScheduler) } .map(_.eventProcessLoop.eventThread) .exists(_.getId == Thread.currentThread().getId) if (isSchedulerEventLoopThread) { // DAGScheduler event loop thread does not have an active SparkSession, the `confGetter` // will return `fallbackConf` which is unexpected. Here we require the caller to get the // conf within `withExistingConf`, otherwise fail the query. val conf = existingConf.get() if (conf != null) { conf } else if (Utils.isTesting) { throw QueryExecutionErrors.cannotGetSQLConfInSchedulerEventLoopThreadError() } else { confGetter.get()() } } else { val conf = existingConf.get() if (conf != null) { conf } else { confGetter.get()() } } } } val ANALYZER_MAX_ITERATIONS = buildConf("spark.sql.analyzer.maxIterations") .internal() .doc("The max number of iterations the analyzer runs.") .version("3.0.0") .intConf .createWithDefault(100) val OPTIMIZER_EXCLUDED_RULES = buildConf("spark.sql.optimizer.excludedRules") .doc("Configures a list of rules to be disabled in the optimizer, in which the rules are " + "specified by their rule names and separated by comma. It is not guaranteed that all the " + "rules in this configuration will eventually be excluded, as some rules are necessary " + "for correctness. The optimizer will log the rules that have indeed been excluded.") .version("2.4.0") .stringConf .createOptional val OPTIMIZER_MAX_ITERATIONS = buildConf("spark.sql.optimizer.maxIterations") .internal() .doc("The max number of iterations the optimizer runs.") .version("2.0.0") .intConf .createWithDefault(100) val OPTIMIZER_INSET_CONVERSION_THRESHOLD = buildConf("spark.sql.optimizer.inSetConversionThreshold") .internal() .doc("The threshold of set size for InSet conversion.") .version("2.0.0") .intConf .createWithDefault(10) val OPTIMIZER_INSET_SWITCH_THRESHOLD = buildConf("spark.sql.optimizer.inSetSwitchThreshold") .internal() .doc("Configures the max set size in InSet for which Spark will generate code with " + "switch statements. This is applicable only to bytes, shorts, ints, dates.") .version("3.0.0") .intConf .checkValue(threshold => threshold >= 0 && threshold <= 600, "The max set size " + "for using switch statements in InSet must be non-negative and less than or equal to 600") .createWithDefault(400) val PLAN_CHANGE_LOG_LEVEL = buildConf("spark.sql.planChangeLog.level") .internal() .doc("Configures the log level for logging the change from the original plan to the new " + "plan after a rule or batch is applied. The value can be 'trace', 'debug', 'info', " + "'warn', or 'error'. The default log level is 'trace'.") .version("3.1.0") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValue(logLevel => Set("TRACE", "DEBUG", "INFO", "WARN", "ERROR").contains(logLevel), "Invalid value for 'spark.sql.planChangeLog.level'. Valid values are " + "'trace', 'debug', 'info', 'warn' and 'error'.") .createWithDefault("trace") val PLAN_CHANGE_LOG_RULES = buildConf("spark.sql.planChangeLog.rules") .internal() .doc("Configures a list of rules for logging plan changes, in which the rules are " + "specified by their rule names and separated by comma.") .version("3.1.0") .stringConf .createOptional val PLAN_CHANGE_LOG_BATCHES = buildConf("spark.sql.planChangeLog.batches") .internal() .doc("Configures a list of batches for logging plan changes, in which the batches " + "are specified by their batch names and separated by comma.") .version("3.1.0") .stringConf .createOptional val DYNAMIC_PARTITION_PRUNING_ENABLED = buildConf("spark.sql.optimizer.dynamicPartitionPruning.enabled") .doc("When true, we will generate predicate for partition column when it's used as join key") .version("3.0.0") .booleanConf .createWithDefault(true) val DYNAMIC_PARTITION_PRUNING_USE_STATS = buildConf("spark.sql.optimizer.dynamicPartitionPruning.useStats") .internal() .doc("When true, distinct count statistics will be used for computing the data size of the " + "partitioned table after dynamic partition pruning, in order to evaluate if it is worth " + "adding an extra subquery as the pruning filter if broadcast reuse is not applicable.") .version("3.0.0") .booleanConf .createWithDefault(true) val DYNAMIC_PARTITION_PRUNING_FALLBACK_FILTER_RATIO = buildConf("spark.sql.optimizer.dynamicPartitionPruning.fallbackFilterRatio") .internal() .doc("When statistics are not available or configured not to be used, this config will be " + "used as the fallback filter ratio for computing the data size of the partitioned table " + "after dynamic partition pruning, in order to evaluate if it is worth adding an extra " + "subquery as the pruning filter if broadcast reuse is not applicable.") .version("3.0.0") .doubleConf .createWithDefault(0.5) val DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY = buildConf("spark.sql.optimizer.dynamicPartitionPruning.reuseBroadcastOnly") .internal() .doc("When true, dynamic partition pruning will only apply when the broadcast exchange of " + "a broadcast hash join operation can be reused as the dynamic pruning filter.") .version("3.0.0") .booleanConf .createWithDefault(true) val COMPRESS_CACHED = buildConf("spark.sql.inMemoryColumnarStorage.compressed") .doc("When set to true Spark SQL will automatically select a compression codec for each " + "column based on statistics of the data.") .version("1.0.1") .booleanConf .createWithDefault(true) val COLUMN_BATCH_SIZE = buildConf("spark.sql.inMemoryColumnarStorage.batchSize") .doc("Controls the size of batches for columnar caching. Larger batch sizes can improve " + "memory utilization and compression, but risk OOMs when caching data.") .version("1.1.1") .intConf .createWithDefault(10000) val IN_MEMORY_PARTITION_PRUNING = buildConf("spark.sql.inMemoryColumnarStorage.partitionPruning") .internal() .doc("When true, enable partition pruning for in-memory columnar tables.") .version("1.2.0") .booleanConf .createWithDefault(true) val IN_MEMORY_TABLE_SCAN_STATISTICS_ENABLED = buildConf("spark.sql.inMemoryTableScanStatistics.enable") .internal() .doc("When true, enable in-memory table scan accumulators.") .version("3.0.0") .booleanConf .createWithDefault(false) val CACHE_VECTORIZED_READER_ENABLED = buildConf("spark.sql.inMemoryColumnarStorage.enableVectorizedReader") .doc("Enables vectorized reader for columnar caching.") .version("2.3.1") .booleanConf .createWithDefault(true) val COLUMN_VECTOR_OFFHEAP_ENABLED = buildConf("spark.sql.columnVector.offheap.enabled") .internal() .doc("When true, use OffHeapColumnVector in ColumnarBatch.") .version("2.3.0") .booleanConf .createWithDefault(false) val PREFER_SORTMERGEJOIN = buildConf("spark.sql.join.preferSortMergeJoin") .internal() .doc("When true, prefer sort merge join over shuffled hash join. " + "Sort merge join consumes less memory than shuffled hash join and it works efficiently " + "when both join tables are large. On the other hand, shuffled hash join can improve " + "performance (e.g., of full outer joins) when one of join tables is much smaller.") .version("2.0.0") .booleanConf .createWithDefault(true) val REQUIRE_ALL_CLUSTER_KEYS_FOR_CO_PARTITION = buildConf("spark.sql.requireAllClusterKeysForCoPartition") .internal() .doc("When true, the planner requires all the clustering keys as the hash partition keys " + "of the children, to eliminate the shuffles for the operator that needs its children to " + "be co-partitioned, such as JOIN node. This is to avoid data skews which can lead to " + "significant performance regression if shuffles are eliminated.") .version("3.3.0") .booleanConf .createWithDefault(true) val REQUIRE_ALL_CLUSTER_KEYS_FOR_DISTRIBUTION = buildConf("spark.sql.requireAllClusterKeysForDistribution") .internal() .doc("When true, the planner requires all the clustering keys as the partition keys " + "(with same ordering) of the children, to eliminate the shuffle for the operator that " + "requires its children be clustered distributed, such as AGGREGATE and WINDOW node. " + "This is to avoid data skews which can lead to significant performance regression if " + "shuffle is eliminated.") .version("3.3.0") .booleanConf .createWithDefault(false) val RADIX_SORT_ENABLED = buildConf("spark.sql.sort.enableRadixSort") .internal() .doc("When true, enable use of radix sort when possible. Radix sort is much faster but " + "requires additional memory to be reserved up-front. The memory overhead may be " + "significant when sorting very small rows (up to 50% more in this case).") .version("2.0.0") .booleanConf .createWithDefault(true) val AUTO_BROADCASTJOIN_THRESHOLD = buildConf("spark.sql.autoBroadcastJoinThreshold") .doc("Configures the maximum size in bytes for a table that will be broadcast to all worker " + "nodes when performing a join. By setting this value to -1 broadcasting can be disabled. " + "Note that currently statistics are only supported for Hive Metastore tables where the " + "command `ANALYZE TABLE <tableName> COMPUTE STATISTICS noscan` has been " + "run, and file-based data source tables where the statistics are computed directly on " + "the files of data.") .version("1.1.0") .bytesConf(ByteUnit.BYTE) .createWithDefaultString("10MB") val SHUFFLE_HASH_JOIN_FACTOR = buildConf("spark.sql.shuffledHashJoinFactor") .doc("The shuffle hash join can be selected if the data size of small" + " side multiplied by this factor is still smaller than the large side.") .version("3.3.0") .intConf .checkValue(_ >= 1, "The shuffle hash join factor cannot be negative.") .createWithDefault(3) val LIMIT_SCALE_UP_FACTOR = buildConf("spark.sql.limit.scaleUpFactor") .internal() .doc("Minimal increase rate in number of partitions between attempts when executing a take " + "on a query. Higher values lead to more partitions read. Lower values might lead to " + "longer execution times as more jobs will be run") .version("2.1.1") .intConf .createWithDefault(4) val ADVANCED_PARTITION_PREDICATE_PUSHDOWN = buildConf("spark.sql.hive.advancedPartitionPredicatePushdown.enabled") .internal() .doc("When true, advanced partition predicate pushdown into Hive metastore is enabled.") .version("2.3.0") .booleanConf .createWithDefault(true) val LEAF_NODE_DEFAULT_PARALLELISM = buildConf("spark.sql.leafNodeDefaultParallelism") .doc("The default parallelism of Spark SQL leaf nodes that produce data, such as the file " + "scan node, the local data scan node, the range node, etc. The default value of this " + "config is 'SparkContext#defaultParallelism'.") .version("3.2.0") .intConf .checkValue(_ > 0, "The value of spark.sql.leafNodeDefaultParallelism must be positive.") .createOptional val SHUFFLE_PARTITIONS = buildConf("spark.sql.shuffle.partitions") .doc("The default number of partitions to use when shuffling data for joins or aggregations. " + "Note: For structured streaming, this configuration cannot be changed between query " + "restarts from the same checkpoint location.") .version("1.1.0") .intConf .checkValue(_ > 0, "The value of spark.sql.shuffle.partitions must be positive") .createWithDefault(200) val SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE = buildConf("spark.sql.adaptive.shuffle.targetPostShuffleInputSize") .internal() .doc("(Deprecated since Spark 3.0)") .version("1.6.0") .bytesConf(ByteUnit.BYTE) .checkValue(_ > 0, "advisoryPartitionSizeInBytes must be positive") .createWithDefaultString("64MB") val ADAPTIVE_EXECUTION_ENABLED = buildConf("spark.sql.adaptive.enabled") .doc("When true, enable adaptive query execution, which re-optimizes the query plan in the " + "middle of query execution, based on accurate runtime statistics.") .version("1.6.0") .booleanConf .createWithDefault(true) val ADAPTIVE_EXECUTION_FORCE_APPLY = buildConf("spark.sql.adaptive.forceApply") .internal() .doc("Adaptive query execution is skipped when the query does not have exchanges or " + "sub-queries. By setting this config to true (together with " + s"'${ADAPTIVE_EXECUTION_ENABLED.key}' set to true), Spark will force apply adaptive query " + "execution for all supported queries.") .version("3.0.0") .booleanConf .createWithDefault(false) val ADAPTIVE_EXECUTION_LOG_LEVEL = buildConf("spark.sql.adaptive.logLevel") .internal() .doc("Configures the log level for adaptive execution logging of plan changes. The value " + "can be 'trace', 'debug', 'info', 'warn', or 'error'. The default log level is 'debug'.") .version("3.0.0") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(Set("TRACE", "DEBUG", "INFO", "WARN", "ERROR")) .createWithDefault("debug") val ADVISORY_PARTITION_SIZE_IN_BYTES = buildConf("spark.sql.adaptive.advisoryPartitionSizeInBytes") .doc("The advisory size in bytes of the shuffle partition during adaptive optimization " + s"(when ${ADAPTIVE_EXECUTION_ENABLED.key} is true). It takes effect when Spark " + "coalesces small shuffle partitions or splits skewed shuffle partition.") .version("3.0.0") .fallbackConf(SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE) val COALESCE_PARTITIONS_ENABLED = buildConf("spark.sql.adaptive.coalescePartitions.enabled") .doc(s"When true and '${ADAPTIVE_EXECUTION_ENABLED.key}' is true, Spark will coalesce " + "contiguous shuffle partitions according to the target size (specified by " + s"'${ADVISORY_PARTITION_SIZE_IN_BYTES.key}'), to avoid too many small tasks.") .version("3.0.0") .booleanConf .createWithDefault(true) val COALESCE_PARTITIONS_PARALLELISM_FIRST = buildConf("spark.sql.adaptive.coalescePartitions.parallelismFirst") .doc("When true, Spark does not respect the target size specified by " + s"'${ADVISORY_PARTITION_SIZE_IN_BYTES.key}' (default 64MB) when coalescing contiguous " + "shuffle partitions, but adaptively calculate the target size according to the default " + "parallelism of the Spark cluster. The calculated size is usually smaller than the " + "configured target size. This is to maximize the parallelism and avoid performance " + "regression when enabling adaptive query execution. It's recommended to set this config " + "to false and respect the configured target size.") .version("3.2.0") .booleanConf .createWithDefault(true) val COALESCE_PARTITIONS_MIN_PARTITION_SIZE = buildConf("spark.sql.adaptive.coalescePartitions.minPartitionSize") .doc("The minimum size of shuffle partitions after coalescing. This is useful when the " + "adaptively calculated target size is too small during partition coalescing.") .version("3.2.0") .bytesConf(ByteUnit.BYTE) .checkValue(_ > 0, "minPartitionSize must be positive") .createWithDefaultString("1MB") val COALESCE_PARTITIONS_MIN_PARTITION_NUM = buildConf("spark.sql.adaptive.coalescePartitions.minPartitionNum") .internal() .doc("(deprecated) The suggested (not guaranteed) minimum number of shuffle partitions " + "after coalescing. If not set, the default value is the default parallelism of the " + "Spark cluster. This configuration only has an effect when " + s"'${ADAPTIVE_EXECUTION_ENABLED.key}' and " + s"'${COALESCE_PARTITIONS_ENABLED.key}' are both true.") .version("3.0.0") .intConf .checkValue(_ > 0, "The minimum number of partitions must be positive.") .createOptional val COALESCE_PARTITIONS_INITIAL_PARTITION_NUM = buildConf("spark.sql.adaptive.coalescePartitions.initialPartitionNum") .doc("The initial number of shuffle partitions before coalescing. If not set, it equals to " + s"${SHUFFLE_PARTITIONS.key}. This configuration only has an effect when " + s"'${ADAPTIVE_EXECUTION_ENABLED.key}' and '${COALESCE_PARTITIONS_ENABLED.key}' " + "are both true.") .version("3.0.0") .intConf .checkValue(_ > 0, "The initial number of partitions must be positive.") .createOptional val FETCH_SHUFFLE_BLOCKS_IN_BATCH = buildConf("spark.sql.adaptive.fetchShuffleBlocksInBatch") .internal() .doc("Whether to fetch the contiguous shuffle blocks in batch. Instead of fetching blocks " + "one by one, fetching contiguous shuffle blocks for the same map task in batch can " + "reduce IO and improve performance. Note, multiple contiguous blocks exist in single " + s"fetch request only happen when '${ADAPTIVE_EXECUTION_ENABLED.key}' and " + s"'${COALESCE_PARTITIONS_ENABLED.key}' are both true. This feature also depends " + "on a relocatable serializer, the concatenation support codec in use, the new version " + "shuffle fetch protocol and io encryption is disabled.") .version("3.0.0") .booleanConf .createWithDefault(true) val LOCAL_SHUFFLE_READER_ENABLED = buildConf("spark.sql.adaptive.localShuffleReader.enabled") .doc(s"When true and '${ADAPTIVE_EXECUTION_ENABLED.key}' is true, Spark tries to use local " + "shuffle reader to read the shuffle data when the shuffle partitioning is not needed, " + "for example, after converting sort-merge join to broadcast-hash join.") .version("3.0.0") .booleanConf .createWithDefault(true) val SKEW_JOIN_ENABLED = buildConf("spark.sql.adaptive.skewJoin.enabled") .doc(s"When true and '${ADAPTIVE_EXECUTION_ENABLED.key}' is true, Spark dynamically " + "handles skew in shuffled join (sort-merge and shuffled hash) by splitting (and " + "replicating if needed) skewed partitions.") .version("3.0.0") .booleanConf .createWithDefault(true) val SKEW_JOIN_SKEWED_PARTITION_FACTOR = buildConf("spark.sql.adaptive.skewJoin.skewedPartitionFactor") .doc("A partition is considered as skewed if its size is larger than this factor " + "multiplying the median partition size and also larger than " + "'spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes'") .version("3.0.0") .intConf .checkValue(_ >= 0, "The skew factor cannot be negative.") .createWithDefault(5) val SKEW_JOIN_SKEWED_PARTITION_THRESHOLD = buildConf("spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes") .doc("A partition is considered as skewed if its size in bytes is larger than this " + s"threshold and also larger than '${SKEW_JOIN_SKEWED_PARTITION_FACTOR.key}' " + "multiplying the median partition size. Ideally this config should be set larger " + s"than '${ADVISORY_PARTITION_SIZE_IN_BYTES.key}'.") .version("3.0.0") .bytesConf(ByteUnit.BYTE) .createWithDefaultString("256MB") val NON_EMPTY_PARTITION_RATIO_FOR_BROADCAST_JOIN = buildConf("spark.sql.adaptive.nonEmptyPartitionRatioForBroadcastJoin") .internal() .doc("The relation with a non-empty partition ratio lower than this config will not be " + "considered as the build side of a broadcast-hash join in adaptive execution regardless " + "of its size.This configuration only has an effect when " + s"'${ADAPTIVE_EXECUTION_ENABLED.key}' is true.") .version("3.0.0") .doubleConf .checkValue(_ >= 0, "The non-empty partition ratio must be positive number.") .createWithDefault(0.2) val ADAPTIVE_OPTIMIZER_EXCLUDED_RULES = buildConf("spark.sql.adaptive.optimizer.excludedRules") .doc("Configures a list of rules to be disabled in the adaptive optimizer, in which the " + "rules are specified by their rule names and separated by comma. The optimizer will log " + "the rules that have indeed been excluded.") .version("3.1.0") .stringConf .createOptional val ADAPTIVE_AUTO_BROADCASTJOIN_THRESHOLD = buildConf("spark.sql.adaptive.autoBroadcastJoinThreshold") .doc("Configures the maximum size in bytes for a table that will be broadcast to all " + "worker nodes when performing a join. By setting this value to -1 broadcasting can be " + s"disabled. The default value is same with ${AUTO_BROADCASTJOIN_THRESHOLD.key}. " + "Note that, this config is used only in adaptive framework.") .version("3.2.0") .bytesConf(ByteUnit.BYTE) .createOptional val ADAPTIVE_MAX_SHUFFLE_HASH_JOIN_LOCAL_MAP_THRESHOLD = buildConf("spark.sql.adaptive.maxShuffledHashJoinLocalMapThreshold") .doc("Configures the maximum size in bytes per partition that can be allowed to build " + "local hash map. If this value is not smaller than " + s"${ADVISORY_PARTITION_SIZE_IN_BYTES.key} and all the partition size are not larger " + "than this config, join selection prefer to use shuffled hash join instead of " + s"sort merge join regardless of the value of ${PREFER_SORTMERGEJOIN.key}.") .version("3.2.0") .bytesConf(ByteUnit.BYTE) .createWithDefault(0L) val ADAPTIVE_OPTIMIZE_SKEWS_IN_REBALANCE_PARTITIONS_ENABLED = buildConf("spark.sql.adaptive.optimizeSkewsInRebalancePartitions.enabled") .doc(s"When true and '${ADAPTIVE_EXECUTION_ENABLED.key}' is true, Spark will optimize the " + "skewed shuffle partitions in RebalancePartitions and split them to smaller ones " + s"according to the target size (specified by '${ADVISORY_PARTITION_SIZE_IN_BYTES.key}'), " + "to avoid data skew.") .version("3.2.0") .booleanConf .createWithDefault(true) val ADAPTIVE_REBALANCE_PARTITIONS_SMALL_PARTITION_FACTOR = buildConf("spark.sql.adaptive.rebalancePartitionsSmallPartitionFactor") .doc(s"A partition will be merged during splitting if its size is small than this factor " + s"multiply ${ADVISORY_PARTITION_SIZE_IN_BYTES.key}.") .version("3.3.0") .doubleConf .checkValue(v => v > 0 && v < 1, "the factor must be in (0, 1)") .createWithDefault(0.2) val ADAPTIVE_FORCE_OPTIMIZE_SKEWED_JOIN = buildConf("spark.sql.adaptive.forceOptimizeSkewedJoin") .doc("When true, force enable OptimizeSkewedJoin even if it introduces extra shuffle.") .version("3.3.0") .booleanConf .createWithDefault(false) val ADAPTIVE_CUSTOM_COST_EVALUATOR_CLASS = buildConf("spark.sql.adaptive.customCostEvaluatorClass") .doc("The custom cost evaluator class to be used for adaptive execution. If not being set," + " Spark will use its own SimpleCostEvaluator by default.") .version("3.2.0") .stringConf .createOptional val SUBEXPRESSION_ELIMINATION_ENABLED = buildConf("spark.sql.subexpressionElimination.enabled") .internal() .doc("When true, common subexpressions will be eliminated.") .version("1.6.0") .booleanConf .createWithDefault(true) val SUBEXPRESSION_ELIMINATION_CACHE_MAX_ENTRIES = buildConf("spark.sql.subexpressionElimination.cache.maxEntries") .internal() .doc("The maximum entries of the cache used for interpreted subexpression elimination.") .version("3.1.0") .intConf .checkValue(_ >= 0, "The maximum must not be negative") .createWithDefault(100) val CASE_SENSITIVE = buildConf("spark.sql.caseSensitive") .internal() .doc("Whether the query analyzer should be case sensitive or not. " + "Default to case insensitive. It is highly discouraged to turn on case sensitive mode.") .version("1.4.0") .booleanConf .createWithDefault(false) val CONSTRAINT_PROPAGATION_ENABLED = buildConf("spark.sql.constraintPropagation.enabled") .internal() .doc("When true, the query optimizer will infer and propagate data constraints in the query " + "plan to optimize them. Constraint propagation can sometimes be computationally expensive " + "for certain kinds of query plans (such as those with a large number of predicates and " + "aliases) which might negatively impact overall runtime.") .version("2.2.0") .booleanConf .createWithDefault(true) val ESCAPED_STRING_LITERALS = buildConf("spark.sql.parser.escapedStringLiterals") .internal() .doc("When true, string literals (including regex patterns) remain escaped in our SQL " + "parser. The default is false since Spark 2.0. Setting it to true can restore the behavior " + "prior to Spark 2.0.") .version("2.2.1") .booleanConf .createWithDefault(false) val FILE_COMPRESSION_FACTOR = buildConf("spark.sql.sources.fileCompressionFactor") .internal() .doc("When estimating the output data size of a table scan, multiply the file size with this " + "factor as the estimated data size, in case the data is compressed in the file and lead to" + " a heavily underestimated result.") .version("2.3.1") .doubleConf .checkValue(_ > 0, "the value of fileCompressionFactor must be greater than 0") .createWithDefault(1.0) val PARQUET_SCHEMA_MERGING_ENABLED = buildConf("spark.sql.parquet.mergeSchema") .doc("When true, the Parquet data source merges schemas collected from all data files, " + "otherwise the schema is picked from the summary file or a random data file " + "if no summary file is available.") .version("1.5.0") .booleanConf .createWithDefault(false) val PARQUET_SCHEMA_RESPECT_SUMMARIES = buildConf("spark.sql.parquet.respectSummaryFiles") .doc("When true, we make assumption that all part-files of Parquet are consistent with " + "summary files and we will ignore them when merging schema. Otherwise, if this is " + "false, which is the default, we will merge all part-files. This should be considered " + "as expert-only option, and shouldn't be enabled before knowing what it means exactly.") .version("1.5.0") .booleanConf .createWithDefault(false) val PARQUET_BINARY_AS_STRING = buildConf("spark.sql.parquet.binaryAsString") .doc("Some other Parquet-producing systems, in particular Impala and older versions of " + "Spark SQL, do not differentiate between binary data and strings when writing out the " + "Parquet schema. This flag tells Spark SQL to interpret binary data as a string to provide " + "compatibility with these systems.") .version("1.1.1") .booleanConf .createWithDefault(false) val PARQUET_INT96_AS_TIMESTAMP = buildConf("spark.sql.parquet.int96AsTimestamp") .doc("Some Parquet-producing systems, in particular Impala, store Timestamp into INT96. " + "Spark would also store Timestamp as INT96 because we need to avoid precision lost of the " + "nanoseconds field. This flag tells Spark SQL to interpret INT96 data as a timestamp to " + "provide compatibility with these systems.") .version("1.3.0") .booleanConf .createWithDefault(true) val PARQUET_INT96_TIMESTAMP_CONVERSION = buildConf("spark.sql.parquet.int96TimestampConversion") .doc("This controls whether timestamp adjustments should be applied to INT96 data when " + "converting to timestamps, for data written by Impala. This is necessary because Impala " + "stores INT96 data with a different timezone offset than Hive & Spark.") .version("2.3.0") .booleanConf .createWithDefault(false) object ParquetOutputTimestampType extends Enumeration { val INT96, TIMESTAMP_MICROS, TIMESTAMP_MILLIS = Value } val PARQUET_OUTPUT_TIMESTAMP_TYPE = buildConf("spark.sql.parquet.outputTimestampType") .doc("Sets which Parquet timestamp type to use when Spark writes data to Parquet files. " + "INT96 is a non-standard but commonly used timestamp type in Parquet. TIMESTAMP_MICROS " + "is a standard timestamp type in Parquet, which stores number of microseconds from the " + "Unix epoch. TIMESTAMP_MILLIS is also standard, but with millisecond precision, which " + "means Spark has to truncate the microsecond portion of its timestamp value.") .version("2.3.0") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(ParquetOutputTimestampType.values.map(_.toString)) .createWithDefault(ParquetOutputTimestampType.INT96.toString) val PARQUET_COMPRESSION = buildConf("spark.sql.parquet.compression.codec") .doc("Sets the compression codec used when writing Parquet files. If either `compression` or " + "`parquet.compression` is specified in the table-specific options/properties, the " + "precedence would be `compression`, `parquet.compression`, " + "`spark.sql.parquet.compression.codec`. Acceptable values include: none, uncompressed, " + "snappy, gzip, lzo, brotli, lz4, zstd.") .version("1.1.1") .stringConf .transform(_.toLowerCase(Locale.ROOT)) .checkValues(Set("none", "uncompressed", "snappy", "gzip", "lzo", "lz4", "brotli", "zstd")) .createWithDefault("snappy") val PARQUET_FILTER_PUSHDOWN_ENABLED = buildConf("spark.sql.parquet.filterPushdown") .doc("Enables Parquet filter push-down optimization when set to true.") .version("1.2.0") .booleanConf .createWithDefault(true) val PARQUET_FILTER_PUSHDOWN_DATE_ENABLED = buildConf("spark.sql.parquet.filterPushdown.date") .doc("If true, enables Parquet filter push-down optimization for Date. " + s"This configuration only has an effect when '${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' is " + "enabled.") .version("2.4.0") .internal() .booleanConf .createWithDefault(true) val PARQUET_FILTER_PUSHDOWN_TIMESTAMP_ENABLED = buildConf("spark.sql.parquet.filterPushdown.timestamp") .doc("If true, enables Parquet filter push-down optimization for Timestamp. " + s"This configuration only has an effect when '${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' is " + "enabled and Timestamp stored as TIMESTAMP_MICROS or TIMESTAMP_MILLIS type.") .version("2.4.0") .internal() .booleanConf .createWithDefault(true) val PARQUET_FILTER_PUSHDOWN_DECIMAL_ENABLED = buildConf("spark.sql.parquet.filterPushdown.decimal") .doc("If true, enables Parquet filter push-down optimization for Decimal. " + s"This configuration only has an effect when '${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' is " + "enabled.") .version("2.4.0") .internal() .booleanConf .createWithDefault(true) val PARQUET_FILTER_PUSHDOWN_STRING_STARTSWITH_ENABLED = buildConf("spark.sql.parquet.filterPushdown.string.startsWith") .doc("If true, enables Parquet filter push-down optimization for string startsWith function. " + s"This configuration only has an effect when '${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' is " + "enabled.") .version("2.4.0") .internal() .booleanConf .createWithDefault(true) val PARQUET_FILTER_PUSHDOWN_INFILTERTHRESHOLD = buildConf("spark.sql.parquet.pushdown.inFilterThreshold") .doc("For IN predicate, Parquet filter will push-down a set of OR clauses if its " + "number of values not exceeds this threshold. Otherwise, Parquet filter will push-down " + "a value greater than or equal to its minimum value and less than or equal to " + "its maximum value. By setting this value to 0 this feature can be disabled. " + s"This configuration only has an effect when '${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' is " + "enabled.") .version("2.4.0") .internal() .intConf .checkValue(threshold => threshold >= 0, "The threshold must not be negative.") .createWithDefault(10) val PARQUET_AGGREGATE_PUSHDOWN_ENABLED = buildConf("spark.sql.parquet.aggregatePushdown") .doc("If true, MAX/MIN/COUNT without filter and group by will be pushed" + " down to Parquet for optimization. MAX/MIN/COUNT for complex types and timestamp" + " can't be pushed down") .version("3.3.0") .booleanConf .createWithDefault(false) val PARQUET_WRITE_LEGACY_FORMAT = buildConf("spark.sql.parquet.writeLegacyFormat") .doc("If true, data will be written in a way of Spark 1.4 and earlier. For example, decimal " + "values will be written in Apache Parquet's fixed-length byte array format, which other " + "systems such as Apache Hive and Apache Impala use. If false, the newer format in Parquet " + "will be used. For example, decimals will be written in int-based format. If Parquet " + "output is intended for use with systems that do not support this newer format, set to true.") .version("1.6.0") .booleanConf .createWithDefault(false) val PARQUET_OUTPUT_COMMITTER_CLASS = buildConf("spark.sql.parquet.output.committer.class") .doc("The output committer class used by Parquet. The specified class needs to be a " + "subclass of org.apache.hadoop.mapreduce.OutputCommitter. Typically, it's also a subclass " + "of org.apache.parquet.hadoop.ParquetOutputCommitter. If it is not, then metadata " + "summaries will never be created, irrespective of the value of " + "parquet.summary.metadata.level") .version("1.5.0") .internal() .stringConf .createWithDefault("org.apache.parquet.hadoop.ParquetOutputCommitter") val PARQUET_VECTORIZED_READER_ENABLED = buildConf("spark.sql.parquet.enableVectorizedReader") .doc("Enables vectorized parquet decoding.") .version("2.0.0") .booleanConf .createWithDefault(true) val PARQUET_RECORD_FILTER_ENABLED = buildConf("spark.sql.parquet.recordLevelFilter.enabled") .doc("If true, enables Parquet's native record-level filtering using the pushed down " + "filters. " + s"This configuration only has an effect when '${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' " + "is enabled and the vectorized reader is not used. You can ensure the vectorized reader " + s"is not used by setting '${PARQUET_VECTORIZED_READER_ENABLED.key}' to false.") .version("2.3.0") .booleanConf .createWithDefault(false) val PARQUET_VECTORIZED_READER_BATCH_SIZE = buildConf("spark.sql.parquet.columnarReaderBatchSize") .doc("The number of rows to include in a parquet vectorized reader batch. The number should " + "be carefully chosen to minimize overhead and avoid OOMs in reading data.") .version("2.4.0") .intConf .createWithDefault(4096) val PARQUET_FIELD_ID_WRITE_ENABLED = buildConf("spark.sql.parquet.fieldId.write.enabled") .doc("Field ID is a native field of the Parquet schema spec. When enabled, " + "Parquet writers will populate the field Id " + "metadata (if present) in the Spark schema to the Parquet schema.") .version("3.3.0") .booleanConf .createWithDefault(true) val PARQUET_FIELD_ID_READ_ENABLED = buildConf("spark.sql.parquet.fieldId.read.enabled") .doc("Field ID is a native field of the Parquet schema spec. When enabled, Parquet readers " + "will use field IDs (if present) in the requested Spark schema to look up Parquet " + "fields instead of using column names") .version("3.3.0") .booleanConf .createWithDefault(false) val IGNORE_MISSING_PARQUET_FIELD_ID = buildConf("spark.sql.parquet.fieldId.read.ignoreMissing") .doc("When the Parquet file doesn't have any field IDs but the " + "Spark read schema is using field IDs to read, we will silently return nulls " + "when this flag is enabled, or error otherwise.") .version("3.3.0") .booleanConf .createWithDefault(false) val ORC_COMPRESSION = buildConf("spark.sql.orc.compression.codec") .doc("Sets the compression codec used when writing ORC files. If either `compression` or " + "`orc.compress` is specified in the table-specific options/properties, the precedence " + "would be `compression`, `orc.compress`, `spark.sql.orc.compression.codec`." + "Acceptable values include: none, uncompressed, snappy, zlib, lzo, zstd, lz4.") .version("2.3.0") .stringConf .transform(_.toLowerCase(Locale.ROOT)) .checkValues(Set("none", "uncompressed", "snappy", "zlib", "lzo", "zstd", "lz4")) .createWithDefault("snappy") val ORC_IMPLEMENTATION = buildConf("spark.sql.orc.impl") .doc("When native, use the native version of ORC support instead of the ORC library in Hive. " + "It is 'hive' by default prior to Spark 2.4.") .version("2.3.0") .internal() .stringConf .checkValues(Set("hive", "native")) .createWithDefault("native") val ORC_VECTORIZED_READER_ENABLED = buildConf("spark.sql.orc.enableVectorizedReader") .doc("Enables vectorized orc decoding.") .version("2.3.0") .booleanConf .createWithDefault(true) val ORC_VECTORIZED_READER_BATCH_SIZE = buildConf("spark.sql.orc.columnarReaderBatchSize") .doc("The number of rows to include in a orc vectorized reader batch. The number should " + "be carefully chosen to minimize overhead and avoid OOMs in reading data.") .version("2.4.0") .intConf .createWithDefault(4096) val ORC_VECTORIZED_READER_NESTED_COLUMN_ENABLED = buildConf("spark.sql.orc.enableNestedColumnVectorizedReader") .doc("Enables vectorized orc decoding for nested column.") .version("3.2.0") .booleanConf .createWithDefault(false) val ORC_FILTER_PUSHDOWN_ENABLED = buildConf("spark.sql.orc.filterPushdown") .doc("When true, enable filter pushdown for ORC files.") .version("1.4.0") .booleanConf .createWithDefault(true) val ORC_AGGREGATE_PUSHDOWN_ENABLED = buildConf("spark.sql.orc.aggregatePushdown") .doc("If true, aggregates will be pushed down to ORC for optimization. Support MIN, MAX and " + "COUNT as aggregate expression. For MIN/MAX, support boolean, integer, float and date " + "type. For COUNT, support all data types.") .version("3.3.0") .booleanConf .createWithDefault(false) val ORC_SCHEMA_MERGING_ENABLED = buildConf("spark.sql.orc.mergeSchema") .doc("When true, the Orc data source merges schemas collected from all data files, " + "otherwise the schema is picked from a random data file.") .version("3.0.0") .booleanConf .createWithDefault(false) val HIVE_VERIFY_PARTITION_PATH = buildConf("spark.sql.hive.verifyPartitionPath") .doc("When true, check all the partition paths under the table\\'s root directory " + "when reading data stored in HDFS. This configuration will be deprecated in the future " + s"releases and replaced by ${SPARK_IGNORE_MISSING_FILES.key}.") .version("1.4.0") .booleanConf .createWithDefault(false) val HIVE_METASTORE_PARTITION_PRUNING = buildConf("spark.sql.hive.metastorePartitionPruning") .doc("When true, some predicates will be pushed down into the Hive metastore so that " + "unmatching partitions can be eliminated earlier.") .version("1.5.0") .booleanConf .createWithDefault(true) val HIVE_METASTORE_PARTITION_PRUNING_INSET_THRESHOLD = buildConf("spark.sql.hive.metastorePartitionPruningInSetThreshold") .doc("The threshold of set size for InSet predicate when pruning partitions through Hive " + "Metastore. When the set size exceeds the threshold, we rewrite the InSet predicate " + "to be greater than or equal to the minimum value in set and less than or equal to the " + "maximum value in set. Larger values may cause Hive Metastore stack overflow. But for " + "InSet inside Not with values exceeding the threshold, we won't push it to Hive Metastore." ) .version("3.1.0") .internal() .intConf .checkValue(_ > 0, "The value of metastorePartitionPruningInSetThreshold must be positive") .createWithDefault(1000) val HIVE_METASTORE_PARTITION_PRUNING_FALLBACK_ON_EXCEPTION = buildConf("spark.sql.hive.metastorePartitionPruningFallbackOnException") .doc("Whether to fallback to get all partitions from Hive metastore and perform partition " + "pruning on Spark client side, when encountering MetaException from the metastore. Note " + "that Spark query performance may degrade if this is enabled and there are many " + "partitions to be listed. If this is disabled, Spark will fail the query instead.") .version("3.3.0") .booleanConf .createWithDefault(false) val HIVE_METASTORE_PARTITION_PRUNING_FAST_FALLBACK = buildConf("spark.sql.hive.metastorePartitionPruningFastFallback") .doc("When this config is enabled, if the predicates are not supported by Hive or Spark " + "does fallback due to encountering MetaException from the metastore, " + "Spark will instead prune partitions by getting the partition names first " + "and then evaluating the filter expressions on the client side. " + "Note that the predicates with TimeZoneAwareExpression is not supported.") .version("3.3.0") .booleanConf .createWithDefault(false) val HIVE_MANAGE_FILESOURCE_PARTITIONS = buildConf("spark.sql.hive.manageFilesourcePartitions") .doc("When true, enable metastore partition management for file source tables as well. " + "This includes both datasource and converted Hive tables. When partition management " + "is enabled, datasource tables store partition in the Hive metastore, and use the " + s"metastore to prune partitions during query planning when " + s"${HIVE_METASTORE_PARTITION_PRUNING.key} is set to true.") .version("2.1.1") .booleanConf .createWithDefault(true) val HIVE_FILESOURCE_PARTITION_FILE_CACHE_SIZE = buildConf("spark.sql.hive.filesourcePartitionFileCacheSize") .doc("When nonzero, enable caching of partition file metadata in memory. All tables share " + "a cache that can use up to specified num bytes for file metadata. This conf only " + "has an effect when hive filesource partition management is enabled.") .version("2.1.1") .longConf .createWithDefault(250 * 1024 * 1024) object HiveCaseSensitiveInferenceMode extends Enumeration { val INFER_AND_SAVE, INFER_ONLY, NEVER_INFER = Value } val HIVE_CASE_SENSITIVE_INFERENCE = buildConf("spark.sql.hive.caseSensitiveInferenceMode") .internal() .doc("Sets the action to take when a case-sensitive schema cannot be read from a Hive Serde " + "table's properties when reading the table with Spark native data sources. Valid options " + "include INFER_AND_SAVE (infer the case-sensitive schema from the underlying data files " + "and write it back to the table properties), INFER_ONLY (infer the schema but don't " + "attempt to write it to the table properties) and NEVER_INFER (the default mode-- fallback " + "to using the case-insensitive metastore schema instead of inferring).") .version("2.1.1") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(HiveCaseSensitiveInferenceMode.values.map(_.toString)) .createWithDefault(HiveCaseSensitiveInferenceMode.NEVER_INFER.toString) val HIVE_TABLE_PROPERTY_LENGTH_THRESHOLD = buildConf("spark.sql.hive.tablePropertyLengthThreshold") .internal() .doc("The maximum length allowed in a single cell when storing Spark-specific information " + "in Hive's metastore as table properties. Currently it covers 2 things: the schema's " + "JSON string, the histogram of column statistics.") .version("3.2.0") .intConf .createOptional val OPTIMIZER_METADATA_ONLY = buildConf("spark.sql.optimizer.metadataOnly") .internal() .doc("When true, enable the metadata-only query optimization that use the table's metadata " + "to produce the partition columns instead of table scans. It applies when all the columns " + "scanned are partition columns and the query has an aggregate operator that satisfies " + "distinct semantics. By default the optimization is disabled, and deprecated as of Spark " + "3.0 since it may return incorrect results when the files are empty, see also SPARK-26709." + "It will be removed in the future releases. If you must use, use 'SparkSessionExtensions' " + "instead to inject it as a custom rule.") .version("2.1.1") .booleanConf .createWithDefault(false) val COLUMN_NAME_OF_CORRUPT_RECORD = buildConf("spark.sql.columnNameOfCorruptRecord") .doc("The name of internal column for storing raw/un-parsed JSON and CSV records that fail " + "to parse.") .version("1.2.0") .stringConf .createWithDefault("_corrupt_record") val BROADCAST_TIMEOUT = buildConf("spark.sql.broadcastTimeout") .doc("Timeout in seconds for the broadcast wait time in broadcast joins.") .version("1.3.0") .timeConf(TimeUnit.SECONDS) .createWithDefaultString(s"${5 * 60}") // This is only used for the thriftserver val THRIFTSERVER_POOL = buildConf("spark.sql.thriftserver.scheduler.pool") .doc("Set a Fair Scheduler pool for a JDBC client session.") .version("1.1.1") .stringConf .createOptional val THRIFTSERVER_INCREMENTAL_COLLECT = buildConf("spark.sql.thriftServer.incrementalCollect") .internal() .doc("When true, enable incremental collection for execution in Thrift Server.") .version("2.0.3") .booleanConf .createWithDefault(false) val THRIFTSERVER_FORCE_CANCEL = buildConf("spark.sql.thriftServer.interruptOnCancel") .doc("When true, all running tasks will be interrupted if one cancels a query. " + "When false, all running tasks will remain until finished.") .version("3.2.0") .booleanConf .createWithDefault(false) val THRIFTSERVER_QUERY_TIMEOUT = buildConf("spark.sql.thriftServer.queryTimeout") .doc("Set a query duration timeout in seconds in Thrift Server. If the timeout is set to " + "a positive value, a running query will be cancelled automatically when the timeout is " + "exceeded, otherwise the query continues to run till completion. If timeout values are " + "set for each statement via `java.sql.Statement.setQueryTimeout` and they are smaller " + "than this configuration value, they take precedence. If you set this timeout and prefer " + "to cancel the queries right away without waiting task to finish, consider enabling " + s"${THRIFTSERVER_FORCE_CANCEL.key} together.") .version("3.1.0") .timeConf(TimeUnit.SECONDS) .createWithDefault(0L) val THRIFTSERVER_UI_STATEMENT_LIMIT = buildConf("spark.sql.thriftserver.ui.retainedStatements") .doc("The number of SQL statements kept in the JDBC/ODBC web UI history.") .version("1.4.0") .intConf .createWithDefault(200) val THRIFTSERVER_UI_SESSION_LIMIT = buildConf("spark.sql.thriftserver.ui.retainedSessions") .doc("The number of SQL client sessions kept in the JDBC/ODBC web UI history.") .version("1.4.0") .intConf .createWithDefault(200) // This is used to set the default data source val DEFAULT_DATA_SOURCE_NAME = buildConf("spark.sql.sources.default") .doc("The default data source to use in input/output.") .version("1.3.0") .stringConf .createWithDefault("parquet") val CONVERT_CTAS = buildConf("spark.sql.hive.convertCTAS") .internal() .doc("When true, a table created by a Hive CTAS statement (no USING clause) " + "without specifying any storage property will be converted to a data source table, " + s"using the data source set by ${DEFAULT_DATA_SOURCE_NAME.key}.") .version("2.0.0") .booleanConf .createWithDefault(false) val GATHER_FASTSTAT = buildConf("spark.sql.hive.gatherFastStats") .internal() .doc("When true, fast stats (number of files and total size of all files) will be gathered" + " in parallel while repairing table partitions to avoid the sequential listing in Hive" + " metastore.") .version("2.0.1") .booleanConf .createWithDefault(true) val PARTITION_COLUMN_TYPE_INFERENCE = buildConf("spark.sql.sources.partitionColumnTypeInference.enabled") .doc("When true, automatically infer the data types for partitioned columns.") .version("1.5.0") .booleanConf .createWithDefault(true) val BUCKETING_ENABLED = buildConf("spark.sql.sources.bucketing.enabled") .doc("When false, we will treat bucketed table as normal table") .version("2.0.0") .booleanConf .createWithDefault(true) val BUCKETING_MAX_BUCKETS = buildConf("spark.sql.sources.bucketing.maxBuckets") .doc("The maximum number of buckets allowed.") .version("2.4.0") .intConf .checkValue(_ > 0, "the value of spark.sql.sources.bucketing.maxBuckets must be greater than 0") .createWithDefault(100000) val AUTO_BUCKETED_SCAN_ENABLED = buildConf("spark.sql.sources.bucketing.autoBucketedScan.enabled") .doc("When true, decide whether to do bucketed scan on input tables based on query plan " + "automatically. Do not use bucketed scan if 1. query does not have operators to utilize " + "bucketing (e.g. join, group-by, etc), or 2. there's an exchange operator between these " + s"operators and table scan. Note when '${BUCKETING_ENABLED.key}' is set to " + "false, this configuration does not take any effect.") .version("3.1.0") .booleanConf .createWithDefault(true) val CAN_CHANGE_CACHED_PLAN_OUTPUT_PARTITIONING = buildConf("spark.sql.optimizer.canChangeCachedPlanOutputPartitioning") .internal() .doc("Whether to forcibly enable some optimization rules that can change the output " + "partitioning of a cached query when executing it for caching. If it is set to true, " + "queries may need an extra shuffle to read the cached data. This configuration is " + "disabled by default. Currently, the optimization rules enabled by this configuration " + s"are ${ADAPTIVE_EXECUTION_ENABLED.key} and ${AUTO_BUCKETED_SCAN_ENABLED.key}.") .version("3.2.0") .booleanConf .createWithDefault(false) val CROSS_JOINS_ENABLED = buildConf("spark.sql.crossJoin.enabled") .internal() .doc("When false, we will throw an error if a query contains a cartesian product without " + "explicit CROSS JOIN syntax.") .version("2.0.0") .booleanConf .createWithDefault(true) val ORDER_BY_ORDINAL = buildConf("spark.sql.orderByOrdinal") .doc("When true, the ordinal numbers are treated as the position in the select list. " + "When false, the ordinal numbers in order/sort by clause are ignored.") .version("2.0.0") .booleanConf .createWithDefault(true) val GROUP_BY_ORDINAL = buildConf("spark.sql.groupByOrdinal") .doc("When true, the ordinal numbers in group by clauses are treated as the position " + "in the select list. When false, the ordinal numbers are ignored.") .version("2.0.0") .booleanConf .createWithDefault(true) val GROUP_BY_ALIASES = buildConf("spark.sql.groupByAliases") .doc("When true, aliases in a select list can be used in group by clauses. When false, " + "an analysis exception is thrown in the case.") .version("2.2.0") .booleanConf .createWithDefault(true) // The output committer class used by data sources. The specified class needs to be a // subclass of org.apache.hadoop.mapreduce.OutputCommitter. val OUTPUT_COMMITTER_CLASS = buildConf("spark.sql.sources.outputCommitterClass") .version("1.4.0") .internal() .stringConf .createOptional val FILE_COMMIT_PROTOCOL_CLASS = buildConf("spark.sql.sources.commitProtocolClass") .version("2.1.1") .internal() .stringConf .createWithDefault( "org.apache.spark.sql.execution.datasources.SQLHadoopMapReduceCommitProtocol") val PARALLEL_PARTITION_DISCOVERY_THRESHOLD = buildConf("spark.sql.sources.parallelPartitionDiscovery.threshold") .doc("The maximum number of paths allowed for listing files at driver side. If the number " + "of detected paths exceeds this value during partition discovery, it tries to list the " + "files with another Spark distributed job. This configuration is effective only when " + "using file-based sources such as Parquet, JSON and ORC.") .version("1.5.0") .intConf .checkValue(parallel => parallel >= 0, "The maximum number of paths allowed for listing " + "files at driver side must not be negative") .createWithDefault(32) val PARALLEL_PARTITION_DISCOVERY_PARALLELISM = buildConf("spark.sql.sources.parallelPartitionDiscovery.parallelism") .doc("The number of parallelism to list a collection of path recursively, Set the " + "number to prevent file listing from generating too many tasks.") .version("2.1.1") .internal() .intConf .createWithDefault(10000) val IGNORE_DATA_LOCALITY = buildConf("spark.sql.sources.ignoreDataLocality") .doc("If true, Spark will not fetch the block locations for each file on " + "listing files. This speeds up file listing, but the scheduler cannot " + "schedule tasks to take advantage of data locality. It can be particularly " + "useful if data is read from a remote cluster so the scheduler could never " + "take advantage of locality anyway.") .version("3.0.0") .internal() .booleanConf .createWithDefault(false) // Whether to automatically resolve ambiguity in join conditions for self-joins. // See SPARK-6231. val DATAFRAME_SELF_JOIN_AUTO_RESOLVE_AMBIGUITY = buildConf("spark.sql.selfJoinAutoResolveAmbiguity") .version("1.4.0") .internal() .booleanConf .createWithDefault(true) val FAIL_AMBIGUOUS_SELF_JOIN_ENABLED = buildConf("spark.sql.analyzer.failAmbiguousSelfJoin") .doc("When true, fail the Dataset query if it contains ambiguous self-join.") .version("3.0.0") .internal() .booleanConf .createWithDefault(true) // Whether to retain group by columns or not in GroupedData.agg. val DATAFRAME_RETAIN_GROUP_COLUMNS = buildConf("spark.sql.retainGroupColumns") .version("1.4.0") .internal() .booleanConf .createWithDefault(true) val DATAFRAME_PIVOT_MAX_VALUES = buildConf("spark.sql.pivotMaxValues") .doc("When doing a pivot without specifying values for the pivot column this is the maximum " + "number of (distinct) values that will be collected without error.") .version("1.6.0") .intConf .createWithDefault(10000) val RUN_SQL_ON_FILES = buildConf("spark.sql.runSQLOnFiles") .internal() .doc("When true, we could use `datasource`.`path` as table in SQL query.") .version("1.6.0") .booleanConf .createWithDefault(true) val WHOLESTAGE_CODEGEN_ENABLED = buildConf("spark.sql.codegen.wholeStage") .internal() .doc("When true, the whole stage (of multiple operators) will be compiled into single java" + " method.") .version("2.0.0") .booleanConf .createWithDefault(true) val WHOLESTAGE_CODEGEN_USE_ID_IN_CLASS_NAME = buildConf("spark.sql.codegen.useIdInClassName") .internal() .doc("When true, embed the (whole-stage) codegen stage ID into " + "the class name of the generated class as a suffix") .version("2.3.1") .booleanConf .createWithDefault(true) val WHOLESTAGE_MAX_NUM_FIELDS = buildConf("spark.sql.codegen.maxFields") .internal() .doc("The maximum number of fields (including nested fields) that will be supported before" + " deactivating whole-stage codegen.") .version("2.0.0") .intConf .createWithDefault(100) val CODEGEN_FACTORY_MODE = buildConf("spark.sql.codegen.factoryMode") .doc("This config determines the fallback behavior of several codegen generators " + "during tests. `FALLBACK` means trying codegen first and then falling back to " + "interpreted if any compile error happens. Disabling fallback if `CODEGEN_ONLY`. " + "`NO_CODEGEN` skips codegen and goes interpreted path always. Note that " + "this config works only for tests.") .version("2.4.0") .internal() .stringConf .checkValues(CodegenObjectFactoryMode.values.map(_.toString)) .createWithDefault(CodegenObjectFactoryMode.FALLBACK.toString) val CODEGEN_FALLBACK = buildConf("spark.sql.codegen.fallback") .internal() .doc("When true, (whole stage) codegen could be temporary disabled for the part of query that" + " fail to compile generated code") .version("2.0.0") .booleanConf .createWithDefault(true) val CODEGEN_LOGGING_MAX_LINES = buildConf("spark.sql.codegen.logging.maxLines") .internal() .doc("The maximum number of codegen lines to log when errors occur. Use -1 for unlimited.") .version("2.3.0") .intConf .checkValue(maxLines => maxLines >= -1, "The maximum must be a positive integer, 0 to " + "disable logging or -1 to apply no limit.") .createWithDefault(1000) val WHOLESTAGE_HUGE_METHOD_LIMIT = buildConf("spark.sql.codegen.hugeMethodLimit") .internal() .doc("The maximum bytecode size of a single compiled Java function generated by whole-stage " + "codegen. When the compiled function exceeds this threshold, the whole-stage codegen is " + "deactivated for this subtree of the current query plan. The default value is 65535, which " + "is the largest bytecode size possible for a valid Java method. When running on HotSpot, " + s"it may be preferable to set the value to ${CodeGenerator.DEFAULT_JVM_HUGE_METHOD_LIMIT} " + "to match HotSpot's implementation.") .version("2.3.0") .intConf .createWithDefault(65535) val CODEGEN_METHOD_SPLIT_THRESHOLD = buildConf("spark.sql.codegen.methodSplitThreshold") .internal() .doc("The threshold of source-code splitting in the codegen. When the number of characters " + "in a single Java function (without comment) exceeds the threshold, the function will be " + "automatically split to multiple smaller ones. We cannot know how many bytecode will be " + "generated, so use the code length as metric. When running on HotSpot, a function's " + "bytecode should not go beyond 8KB, otherwise it will not be JITted; it also should not " + "be too small, otherwise there will be many function calls.") .version("3.0.0") .intConf .checkValue(threshold => threshold > 0, "The threshold must be a positive integer.") .createWithDefault(1024) val WHOLESTAGE_SPLIT_CONSUME_FUNC_BY_OPERATOR = buildConf("spark.sql.codegen.splitConsumeFuncByOperator") .internal() .doc("When true, whole stage codegen would put the logic of consuming rows of each " + "physical operator into individual methods, instead of a single big method. This can be " + "used to avoid oversized function that can miss the opportunity of JIT optimization.") .version("2.3.1") .booleanConf .createWithDefault(true) val FILES_MAX_PARTITION_BYTES = buildConf("spark.sql.files.maxPartitionBytes") .doc("The maximum number of bytes to pack into a single partition when reading files. " + "This configuration is effective only when using file-based sources such as Parquet, JSON " + "and ORC.") .version("2.0.0") .bytesConf(ByteUnit.BYTE) .createWithDefaultString("128MB") // parquet.block.size val FILES_OPEN_COST_IN_BYTES = buildConf("spark.sql.files.openCostInBytes") .internal() .doc("The estimated cost to open a file, measured by the number of bytes could be scanned in" + " the same time. This is used when putting multiple files into a partition. It's better to" + " over estimated, then the partitions with small files will be faster than partitions with" + " bigger files (which is scheduled first). This configuration is effective only when using" + " file-based sources such as Parquet, JSON and ORC.") .version("2.0.0") .bytesConf(ByteUnit.BYTE) .createWithDefaultString("4MB") val FILES_MIN_PARTITION_NUM = buildConf("spark.sql.files.minPartitionNum") .doc("The suggested (not guaranteed) minimum number of split file partitions. " + "If not set, the default value is `spark.default.parallelism`. This configuration is " + "effective only when using file-based sources such as Parquet, JSON and ORC.") .version("3.1.0") .intConf .checkValue(v => v > 0, "The min partition number must be a positive integer.") .createOptional val IGNORE_CORRUPT_FILES = buildConf("spark.sql.files.ignoreCorruptFiles") .doc("Whether to ignore corrupt files. If true, the Spark jobs will continue to run when " + "encountering corrupted files and the contents that have been read will still be returned. " + "This configuration is effective only when using file-based sources such as Parquet, JSON " + "and ORC.") .version("2.1.1") .booleanConf .createWithDefault(false) val IGNORE_MISSING_FILES = buildConf("spark.sql.files.ignoreMissingFiles") .doc("Whether to ignore missing files. If true, the Spark jobs will continue to run when " + "encountering missing files and the contents that have been read will still be returned. " + "This configuration is effective only when using file-based sources such as Parquet, JSON " + "and ORC.") .version("2.3.0") .booleanConf .createWithDefault(false) val MAX_RECORDS_PER_FILE = buildConf("spark.sql.files.maxRecordsPerFile") .doc("Maximum number of records to write out to a single file. " + "If this value is zero or negative, there is no limit.") .version("2.2.0") .longConf .createWithDefault(0) val EXCHANGE_REUSE_ENABLED = buildConf("spark.sql.exchange.reuse") .internal() .doc("When true, the planner will try to find out duplicated exchanges and re-use them.") .version("2.0.0") .booleanConf .createWithDefault(true) val SUBQUERY_REUSE_ENABLED = buildConf("spark.sql.execution.reuseSubquery") .internal() .doc("When true, the planner will try to find out duplicated subqueries and re-use them.") .version("3.0.0") .booleanConf .createWithDefault(true) val REMOVE_REDUNDANT_PROJECTS_ENABLED = buildConf("spark.sql.execution.removeRedundantProjects") .internal() .doc("Whether to remove redundant project exec node based on children's output and " + "ordering requirement.") .version("3.1.0") .booleanConf .createWithDefault(true) val REMOVE_REDUNDANT_SORTS_ENABLED = buildConf("spark.sql.execution.removeRedundantSorts") .internal() .doc("Whether to remove redundant physical sort node") .version("2.4.8") .booleanConf .createWithDefault(true) val REPLACE_HASH_WITH_SORT_AGG_ENABLED = buildConf("spark.sql.execution.replaceHashWithSortAgg") .internal() .doc("Whether to replace hash aggregate node with sort aggregate based on children's ordering") .version("3.3.0") .booleanConf .createWithDefault(false) val STATE_STORE_PROVIDER_CLASS = buildConf("spark.sql.streaming.stateStore.providerClass") .internal() .doc( "The class used to manage state data in stateful streaming queries. This class must " + "be a subclass of StateStoreProvider, and must have a zero-arg constructor. " + "Note: For structured streaming, this configuration cannot be changed between query " + "restarts from the same checkpoint location.") .version("2.3.0") .stringConf .createWithDefault( "org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider") val STATE_SCHEMA_CHECK_ENABLED = buildConf("spark.sql.streaming.stateStore.stateSchemaCheck") .doc("When true, Spark will validate the state schema against schema on existing state and " + "fail query if it's incompatible.") .version("3.1.0") .booleanConf .createWithDefault(true) val STATE_STORE_MIN_DELTAS_FOR_SNAPSHOT = buildConf("spark.sql.streaming.stateStore.minDeltasForSnapshot") .internal() .doc("Minimum number of state store delta files that needs to be generated before they " + "consolidated into snapshots.") .version("2.0.0") .intConf .createWithDefault(10) val STATE_STORE_FORMAT_VALIDATION_ENABLED = buildConf("spark.sql.streaming.stateStore.formatValidation.enabled") .internal() .doc("When true, check if the data from state store is valid or not when running streaming " + "queries. This can happen if the state store format has been changed. Note, the feature " + "is only effective in the build-in HDFS state store provider now.") .version("3.1.0") .booleanConf .createWithDefault(true) val FLATMAPGROUPSWITHSTATE_STATE_FORMAT_VERSION = buildConf("spark.sql.streaming.flatMapGroupsWithState.stateFormatVersion") .internal() .doc("State format version used by flatMapGroupsWithState operation in a streaming query") .version("2.4.0") .intConf .checkValue(v => Set(1, 2).contains(v), "Valid versions are 1 and 2") .createWithDefault(2) val CHECKPOINT_LOCATION = buildConf("spark.sql.streaming.checkpointLocation") .doc("The default location for storing checkpoint data for streaming queries.") .version("2.0.0") .stringConf .createOptional val FORCE_DELETE_TEMP_CHECKPOINT_LOCATION = buildConf("spark.sql.streaming.forceDeleteTempCheckpointLocation") .doc("When true, enable temporary checkpoint locations force delete.") .version("3.0.0") .booleanConf .createWithDefault(false) val MIN_BATCHES_TO_RETAIN = buildConf("spark.sql.streaming.minBatchesToRetain") .internal() .doc("The minimum number of batches that must be retained and made recoverable.") .version("2.1.1") .intConf .createWithDefault(100) val MAX_BATCHES_TO_RETAIN_IN_MEMORY = buildConf("spark.sql.streaming.maxBatchesToRetainInMemory") .internal() .doc("The maximum number of batches which will be retained in memory to avoid " + "loading from files. The value adjusts a trade-off between memory usage vs cache miss: " + "'2' covers both success and direct failure cases, '1' covers only success case, " + "and '0' covers extreme case - disable cache to maximize memory size of executors.") .version("2.4.0") .intConf .createWithDefault(2) val STREAMING_MAINTENANCE_INTERVAL = buildConf("spark.sql.streaming.stateStore.maintenanceInterval") .internal() .doc("The interval in milliseconds between triggering maintenance tasks in StateStore. " + "The maintenance task executes background maintenance task in all the loaded store " + "providers if they are still the active instances according to the coordinator. If not, " + "inactive instances of store providers will be closed.") .version("2.0.0") .timeConf(TimeUnit.MILLISECONDS) .createWithDefault(TimeUnit.MINUTES.toMillis(1)) // 1 minute val STATE_STORE_COMPRESSION_CODEC = buildConf("spark.sql.streaming.stateStore.compression.codec") .internal() .doc("The codec used to compress delta and snapshot files generated by StateStore. " + "By default, Spark provides four codecs: lz4, lzf, snappy, and zstd. You can also " + "use fully qualified class names to specify the codec. Default codec is lz4.") .version("3.1.0") .stringConf .createWithDefault("lz4") /** * Note: this is defined in `RocksDBConf.FORMAT_VERSION`. These two places should be updated * together. */ val STATE_STORE_ROCKSDB_FORMAT_VERSION = buildConf("spark.sql.streaming.stateStore.rocksdb.formatVersion") .internal() .doc("Set the RocksDB format version. This will be stored in the checkpoint when starting " + "a streaming query. The checkpoint will use this RocksDB format version in the entire " + "lifetime of the query.") .version("3.2.0") .intConf .checkValue(_ >= 0, "Must not be negative") // 5 is the default table format version for RocksDB 6.20.3. .createWithDefault(5) val STREAMING_AGGREGATION_STATE_FORMAT_VERSION = buildConf("spark.sql.streaming.aggregation.stateFormatVersion") .internal() .doc("State format version used by streaming aggregation operations in a streaming query. " + "State between versions are tend to be incompatible, so state format version shouldn't " + "be modified after running.") .version("2.4.0") .intConf .checkValue(v => Set(1, 2).contains(v), "Valid versions are 1 and 2") .createWithDefault(2) val STREAMING_STOP_ACTIVE_RUN_ON_RESTART = buildConf("spark.sql.streaming.stopActiveRunOnRestart") .doc("Running multiple runs of the same streaming query concurrently is not supported. " + "If we find a concurrent active run for a streaming query (in the same or different " + "SparkSessions on the same cluster) and this flag is true, we will stop the old streaming " + "query run to start the new one.") .version("3.0.0") .booleanConf .createWithDefault(true) val STREAMING_JOIN_STATE_FORMAT_VERSION = buildConf("spark.sql.streaming.join.stateFormatVersion") .internal() .doc("State format version used by streaming join operations in a streaming query. " + "State between versions are tend to be incompatible, so state format version shouldn't " + "be modified after running.") .version("3.0.0") .intConf .checkValue(v => Set(1, 2).contains(v), "Valid versions are 1 and 2") .createWithDefault(2) val STREAMING_SESSION_WINDOW_MERGE_SESSIONS_IN_LOCAL_PARTITION = buildConf("spark.sql.streaming.sessionWindow.merge.sessions.in.local.partition") .doc("When true, streaming session window sorts and merge sessions in local partition " + "prior to shuffle. This is to reduce the rows to shuffle, but only beneficial when " + "there're lots of rows in a batch being assigned to same sessions.") .version("3.2.0") .booleanConf .createWithDefault(false) val STREAMING_SESSION_WINDOW_STATE_FORMAT_VERSION = buildConf("spark.sql.streaming.sessionWindow.stateFormatVersion") .internal() .doc("State format version used by streaming session window in a streaming query. " + "State between versions are tend to be incompatible, so state format version shouldn't " + "be modified after running.") .version("3.2.0") .intConf .checkValue(v => Set(1).contains(v), "Valid version is 1") .createWithDefault(1) val UNSUPPORTED_OPERATION_CHECK_ENABLED = buildConf("spark.sql.streaming.unsupportedOperationCheck") .internal() .doc("When true, the logical plan for streaming query will be checked for unsupported" + " operations.") .version("2.0.0") .booleanConf .createWithDefault(true) val USE_DEPRECATED_KAFKA_OFFSET_FETCHING = buildConf("spark.sql.streaming.kafka.useDeprecatedOffsetFetching") .internal() .doc("When true, the deprecated Consumer based offset fetching used which could cause " + "infinite wait in Spark queries. Such cases query restart is the only workaround. " + "For further details please see Offset Fetching chapter of Structured Streaming Kafka " + "Integration Guide.") .version("3.1.0") .booleanConf .createWithDefault(true) val STATEFUL_OPERATOR_CHECK_CORRECTNESS_ENABLED = buildConf("spark.sql.streaming.statefulOperator.checkCorrectness.enabled") .internal() .doc("When true, the stateful operators for streaming query will be checked for possible " + "correctness issue due to global watermark. The correctness issue comes from queries " + "containing stateful operation which can emit rows older than the current watermark " + "plus allowed late record delay, which are \\"late rows\\" in downstream stateful " + "operations and these rows can be discarded. Please refer the programming guide doc for " + "more details. Once the issue is detected, Spark will throw analysis exception. " + "When this config is disabled, Spark will just print warning message for users. " + "Prior to Spark 3.1.0, the behavior is disabling this config.") .version("3.1.0") .booleanConf .createWithDefault(true) val FILESTREAM_SINK_METADATA_IGNORED = buildConf("spark.sql.streaming.fileStreamSink.ignoreMetadata") .internal() .doc("If this is enabled, when Spark reads from the results of a streaming query written " + "by `FileStreamSink`, Spark will ignore the metadata log and treat it as normal path to " + "read, e.g. listing files using HDFS APIs.") .version("3.2.0") .booleanConf .createWithDefault(false) val VARIABLE_SUBSTITUTE_ENABLED = buildConf("spark.sql.variable.substitute") .doc("This enables substitution using syntax like `${var}`, `${system:var}`, " + "and `${env:var}`.") .version("2.0.0") .booleanConf .createWithDefault(true) val ENABLE_TWOLEVEL_AGG_MAP = buildConf("spark.sql.codegen.aggregate.map.twolevel.enabled") .internal() .doc("Enable two-level aggregate hash map. When enabled, records will first be " + "inserted/looked-up at a 1st-level, small, fast map, and then fallback to a " + "2nd-level, larger, slower map when 1st level is full or keys cannot be found. " + "When disabled, records go directly to the 2nd level.") .version("2.3.0") .booleanConf .createWithDefault(true) val ENABLE_TWOLEVEL_AGG_MAP_PARTIAL_ONLY = buildConf("spark.sql.codegen.aggregate.map.twolevel.partialOnly") .internal() .doc("Enable two-level aggregate hash map for partial aggregate only, " + "because final aggregate might get more distinct keys compared to partial aggregate. " + "Overhead of looking up 1st-level map might dominate when having a lot of distinct keys.") .version("3.2.1") .booleanConf .createWithDefault(true) val ENABLE_VECTORIZED_HASH_MAP = buildConf("spark.sql.codegen.aggregate.map.vectorized.enable") .internal() .doc("Enable vectorized aggregate hash map. This is for testing/benchmarking only.") .version("3.0.0") .booleanConf .createWithDefault(false) val CODEGEN_SPLIT_AGGREGATE_FUNC = buildConf("spark.sql.codegen.aggregate.splitAggregateFunc.enabled") .internal() .doc("When true, the code generator would split aggregate code into individual methods " + "instead of a single big method. This can be used to avoid oversized function that " + "can miss the opportunity of JIT optimization.") .version("3.0.0") .booleanConf .createWithDefault(true) val ENABLE_SORT_AGGREGATE_CODEGEN = buildConf("spark.sql.codegen.aggregate.sortAggregate.enabled") .internal() .doc("When true, enable code-gen for sort aggregate.") .version("3.3.0") .booleanConf .createWithDefault(true) val ENABLE_FULL_OUTER_SHUFFLED_HASH_JOIN_CODEGEN = buildConf("spark.sql.codegen.join.fullOuterShuffledHashJoin.enabled") .internal() .doc("When true, enable code-gen for FULL OUTER shuffled hash join.") .version("3.3.0") .booleanConf .createWithDefault(true) val ENABLE_FULL_OUTER_SORT_MERGE_JOIN_CODEGEN = buildConf("spark.sql.codegen.join.fullOuterSortMergeJoin.enabled") .internal() .doc("When true, enable code-gen for FULL OUTER sort merge join.") .version("3.3.0") .booleanConf .createWithDefault(true) val ENABLE_EXISTENCE_SORT_MERGE_JOIN_CODEGEN = buildConf("spark.sql.codegen.join.existenceSortMergeJoin.enabled") .internal() .doc("When true, enable code-gen for Existence sort merge join.") .version("3.3.0") .booleanConf .createWithDefault(true) val MAX_NESTED_VIEW_DEPTH = buildConf("spark.sql.view.maxNestedViewDepth") .internal() .doc("The maximum depth of a view reference in a nested view. A nested view may reference " + "other nested views, the dependencies are organized in a directed acyclic graph (DAG). " + "However the DAG depth may become too large and cause unexpected behavior. This " + "configuration puts a limit on this: when the depth of a view exceeds this value during " + "analysis, we terminate the resolution to avoid potential errors.") .version("2.2.0") .intConf .checkValue(depth => depth > 0, "The maximum depth of a view reference in a nested view " + "must be positive.") .createWithDefault(100) val ALLOW_PARAMETERLESS_COUNT = buildConf("spark.sql.legacy.allowParameterlessCount") .internal() .doc("When true, the SQL function 'count' is allowed to take no parameters.") .version("3.1.1") .booleanConf .createWithDefault(false) val ALLOW_NON_EMPTY_LOCATION_IN_CTAS = buildConf("spark.sql.legacy.allowNonEmptyLocationInCTAS") .internal() .doc("When false, CTAS with LOCATION throws an analysis exception if the " + "location is not empty.") .version("3.2.0") .booleanConf .createWithDefault(false) val ALLOW_STAR_WITH_SINGLE_TABLE_IDENTIFIER_IN_COUNT = buildConf("spark.sql.legacy.allowStarWithSingleTableIdentifierInCount") .internal() .doc("When true, the SQL function 'count' is allowed to take single 'tblName.*' as parameter") .version("3.2") .booleanConf .createWithDefault(false) val USE_CURRENT_SQL_CONFIGS_FOR_VIEW = buildConf("spark.sql.legacy.useCurrentConfigsForView") .internal() .doc("When true, SQL Configs of the current active SparkSession instead of the captured " + "ones will be applied during the parsing and analysis phases of the view resolution.") .version("3.1.0") .booleanConf .createWithDefault(false) val STORE_ANALYZED_PLAN_FOR_VIEW = buildConf("spark.sql.legacy.storeAnalyzedPlanForView") .internal() .doc("When true, analyzed plan instead of SQL text will be stored when creating " + "temporary view") .version("3.1.0") .booleanConf .createWithDefault(false) val ALLOW_AUTO_GENERATED_ALIAS_FOR_VEW = buildConf("spark.sql.legacy.allowAutoGeneratedAliasForView") .internal() .doc("When true, it's allowed to use a input query without explicit alias when creating " + "a permanent view.") .version("3.2.0") .booleanConf .createWithDefault(false) val STREAMING_FILE_COMMIT_PROTOCOL_CLASS = buildConf("spark.sql.streaming.commitProtocolClass") .version("2.1.0") .internal() .stringConf .createWithDefault("org.apache.spark.sql.execution.streaming.ManifestFileCommitProtocol") val STREAMING_MULTIPLE_WATERMARK_POLICY = buildConf("spark.sql.streaming.multipleWatermarkPolicy") .doc("Policy to calculate the global watermark value when there are multiple watermark " + "operators in a streaming query. The default value is 'min' which chooses " + "the minimum watermark reported across multiple operators. Other alternative value is " + "'max' which chooses the maximum across multiple operators. " + "Note: This configuration cannot be changed between query restarts from the same " + "checkpoint location.") .version("2.4.0") .stringConf .transform(_.toLowerCase(Locale.ROOT)) .checkValue( str => Set("min", "max").contains(str), "Invalid value for 'spark.sql.streaming.multipleWatermarkPolicy'. " + "Valid values are 'min' and 'max'") .createWithDefault("min") // must be same as MultipleWatermarkPolicy.DEFAULT_POLICY_NAME val OBJECT_AGG_SORT_BASED_FALLBACK_THRESHOLD = buildConf("spark.sql.objectHashAggregate.sortBased.fallbackThreshold") .internal() .doc("In the case of ObjectHashAggregateExec, when the size of the in-memory hash map " + "grows too large, we will fall back to sort-based aggregation. This option sets a row " + "count threshold for the size of the hash map.") .version("2.2.0") .intConf // We are trying to be conservative and use a relatively small default count threshold here // since the state object of some TypedImperativeAggregate function can be quite large (e.g. // percentile_approx). .createWithDefault(128) val USE_OBJECT_HASH_AGG = buildConf("spark.sql.execution.useObjectHashAggregateExec") .internal() .doc("Decides if we use ObjectHashAggregateExec") .version("2.2.0") .booleanConf .createWithDefault(true) val JSON_GENERATOR_IGNORE_NULL_FIELDS = buildConf("spark.sql.jsonGenerator.ignoreNullFields") .doc("Whether to ignore null fields when generating JSON objects in JSON data source and " + "JSON functions such as to_json. " + "If false, it generates null for null fields in JSON objects.") .version("3.0.0") .booleanConf .createWithDefault(true) val JSON_EXPRESSION_OPTIMIZATION = buildConf("spark.sql.optimizer.enableJsonExpressionOptimization") .doc("Whether to optimize JSON expressions in SQL optimizer. It includes pruning " + "unnecessary columns from from_json, simplifying from_json + to_json, to_json + " + "named_struct(from_json.col1, from_json.col2, ....).") .version("3.1.0") .booleanConf .createWithDefault(true) val CSV_EXPRESSION_OPTIMIZATION = buildConf("spark.sql.optimizer.enableCsvExpressionOptimization") .doc("Whether to optimize CSV expressions in SQL optimizer. It includes pruning " + "unnecessary columns from from_csv.") .version("3.2.0") .booleanConf .createWithDefault(true) val COLLAPSE_PROJECT_ALWAYS_INLINE = buildConf("spark.sql.optimizer.collapseProjectAlwaysInline") .doc("Whether to always collapse two adjacent projections and inline expressions even if " + "it causes extra duplication.") .version("3.3.0") .booleanConf .createWithDefault(false) val FILE_SINK_LOG_DELETION = buildConf("spark.sql.streaming.fileSink.log.deletion") .internal() .doc("Whether to delete the expired log files in file stream sink.") .version("2.0.0") .booleanConf .createWithDefault(true) val FILE_SINK_LOG_COMPACT_INTERVAL = buildConf("spark.sql.streaming.fileSink.log.compactInterval") .internal() .doc("Number of log files after which all the previous files " + "are compacted into the next log file.") .version("2.0.0") .intConf .createWithDefault(10) val FILE_SINK_LOG_CLEANUP_DELAY = buildConf("spark.sql.streaming.fileSink.log.cleanupDelay") .internal() .doc("How long that a file is guaranteed to be visible for all readers.") .version("2.0.0") .timeConf(TimeUnit.MILLISECONDS) .createWithDefault(TimeUnit.MINUTES.toMillis(10)) // 10 minutes val FILE_SOURCE_LOG_DELETION = buildConf("spark.sql.streaming.fileSource.log.deletion") .internal() .doc("Whether to delete the expired log files in file stream source.") .version("2.0.1") .booleanConf .createWithDefault(true) val FILE_SOURCE_LOG_COMPACT_INTERVAL = buildConf("spark.sql.streaming.fileSource.log.compactInterval") .internal() .doc("Number of log files after which all the previous files " + "are compacted into the next log file.") .version("2.0.1") .intConf .createWithDefault(10) val FILE_SOURCE_LOG_CLEANUP_DELAY = buildConf("spark.sql.streaming.fileSource.log.cleanupDelay") .internal() .doc("How long in milliseconds a file is guaranteed to be visible for all readers.") .version("2.0.1") .timeConf(TimeUnit.MILLISECONDS) .createWithDefault(TimeUnit.MINUTES.toMillis(10)) // 10 minutes val FILE_SOURCE_SCHEMA_FORCE_NULLABLE = buildConf("spark.sql.streaming.fileSource.schema.forceNullable") .internal() .doc("When true, force the schema of streaming file source to be nullable (including all " + "the fields). Otherwise, the schema might not be compatible with actual data, which " + "leads to corruptions.") .version("3.0.0") .booleanConf .createWithDefault(true) val FILE_SOURCE_CLEANER_NUM_THREADS = buildConf("spark.sql.streaming.fileSource.cleaner.numThreads") .doc("Number of threads used in the file source completed file cleaner.") .version("3.0.0") .intConf .createWithDefault(1) val STREAMING_SCHEMA_INFERENCE = buildConf("spark.sql.streaming.schemaInference") .internal() .doc("Whether file-based streaming sources will infer its own schema") .version("2.0.0") .booleanConf .createWithDefault(false) val STREAMING_POLLING_DELAY = buildConf("spark.sql.streaming.pollingDelay") .internal() .doc("How long to delay polling new data when no data is available") .version("2.0.0") .timeConf(TimeUnit.MILLISECONDS) .createWithDefault(10L) val STREAMING_STOP_TIMEOUT = buildConf("spark.sql.streaming.stopTimeout") .doc("How long to wait in milliseconds for the streaming execution thread to stop when " + "calling the streaming query's stop() method. 0 or negative values wait indefinitely.") .version("3.0.0") .timeConf(TimeUnit.MILLISECONDS) .createWithDefaultString("0") val STREAMING_NO_DATA_PROGRESS_EVENT_INTERVAL = buildConf("spark.sql.streaming.noDataProgressEventInterval") .internal() .doc("How long to wait between two progress events when there is no data") .version("2.1.1") .timeConf(TimeUnit.MILLISECONDS) .createWithDefault(10000L) val STREAMING_NO_DATA_MICRO_BATCHES_ENABLED = buildConf("spark.sql.streaming.noDataMicroBatches.enabled") .doc( "Whether streaming micro-batch engine will execute batches without data " + "for eager state management for stateful streaming queries.") .version("2.4.1") .booleanConf .createWithDefault(true) val STREAMING_METRICS_ENABLED = buildConf("spark.sql.streaming.metricsEnabled") .doc("Whether Dropwizard/Codahale metrics will be reported for active streaming queries.") .version("2.0.2") .booleanConf .createWithDefault(false) val STREAMING_PROGRESS_RETENTION = buildConf("spark.sql.streaming.numRecentProgressUpdates") .doc("The number of progress updates to retain for a streaming query") .version("2.1.1") .intConf .createWithDefault(100) val STREAMING_CHECKPOINT_FILE_MANAGER_CLASS = buildConf("spark.sql.streaming.checkpointFileManagerClass") .internal() .doc("The class used to write checkpoint files atomically. This class must be a subclass " + "of the interface CheckpointFileManager.") .version("2.4.0") .stringConf val STREAMING_CHECKPOINT_ESCAPED_PATH_CHECK_ENABLED = buildConf("spark.sql.streaming.checkpoint.escapedPathCheck.enabled") .internal() .doc("Whether to detect a streaming query may pick up an incorrect checkpoint path due " + "to SPARK-26824.") .version("3.0.0") .booleanConf .createWithDefault(true) val PARALLEL_FILE_LISTING_IN_STATS_COMPUTATION = buildConf("spark.sql.statistics.parallelFileListingInStatsComputation.enabled") .internal() .doc("When true, SQL commands use parallel file listing, " + "as opposed to single thread listing. " + "This usually speeds up commands that need to list many directories.") .version("2.4.1") .booleanConf .createWithDefault(true) val DEFAULT_SIZE_IN_BYTES = buildConf("spark.sql.defaultSizeInBytes") .internal() .doc("The default table size used in query planning. By default, it is set to Long.MaxValue " + s"which is larger than `${AUTO_BROADCASTJOIN_THRESHOLD.key}` to be more conservative. " + "That is to say by default the optimizer will not choose to broadcast a table unless it " + "knows for sure its size is small enough.") .version("1.1.0") .bytesConf(ByteUnit.BYTE) .createWithDefault(Long.MaxValue) val ENABLE_FALL_BACK_TO_HDFS_FOR_STATS = buildConf("spark.sql.statistics.fallBackToHdfs") .doc("When true, it will fall back to HDFS if the table statistics are not available from " + "table metadata. This is useful in determining if a table is small enough to use " + "broadcast joins. This flag is effective only for non-partitioned Hive tables. " + "For non-partitioned data source tables, it will be automatically recalculated if table " + "statistics are not available. For partitioned data source and partitioned Hive tables, " + s"It is '${DEFAULT_SIZE_IN_BYTES.key}' if table statistics are not available.") .version("2.0.0") .booleanConf .createWithDefault(false) val NDV_MAX_ERROR = buildConf("spark.sql.statistics.ndv.maxError") .internal() .doc("The maximum relative standard deviation allowed in HyperLogLog++ algorithm " + "when generating column level statistics.") .version("2.1.1") .doubleConf .createWithDefault(0.05) val HISTOGRAM_ENABLED = buildConf("spark.sql.statistics.histogram.enabled") .doc("Generates histograms when computing column statistics if enabled. Histograms can " + "provide better estimation accuracy. Currently, Spark only supports equi-height " + "histogram. Note that collecting histograms takes extra cost. For example, collecting " + "column statistics usually takes only one table scan, but generating equi-height " + "histogram will cause an extra table scan.") .version("2.3.0") .booleanConf .createWithDefault(false) val HISTOGRAM_NUM_BINS = buildConf("spark.sql.statistics.histogram.numBins") .internal() .doc("The number of bins when generating histograms.") .version("2.3.0") .intConf .checkValue(num => num > 1, "The number of bins must be greater than 1.") .createWithDefault(254) val PERCENTILE_ACCURACY = buildConf("spark.sql.statistics.percentile.accuracy") .internal() .doc("Accuracy of percentile approximation when generating equi-height histograms. " + "Larger value means better accuracy. The relative error can be deduced by " + "1.0 / PERCENTILE_ACCURACY.") .version("2.3.0") .intConf .createWithDefault(10000) val AUTO_SIZE_UPDATE_ENABLED = buildConf("spark.sql.statistics.size.autoUpdate.enabled") .doc("Enables automatic update for table size once table's data is changed. Note that if " + "the total number of files of the table is very large, this can be expensive and slow " + "down data change commands.") .version("2.3.0") .booleanConf .createWithDefault(false) val CBO_ENABLED = buildConf("spark.sql.cbo.enabled") .doc("Enables CBO for estimation of plan statistics when set true.") .version("2.2.0") .booleanConf .createWithDefault(false) val PLAN_STATS_ENABLED = buildConf("spark.sql.cbo.planStats.enabled") .doc("When true, the logical plan will fetch row counts and column statistics from catalog.") .version("3.0.0") .booleanConf .createWithDefault(false) val JOIN_REORDER_ENABLED = buildConf("spark.sql.cbo.joinReorder.enabled") .doc("Enables join reorder in CBO.") .version("2.2.0") .booleanConf .createWithDefault(false) val JOIN_REORDER_DP_THRESHOLD = buildConf("spark.sql.cbo.joinReorder.dp.threshold") .doc("The maximum number of joined nodes allowed in the dynamic programming algorithm.") .version("2.2.0") .intConf .checkValue(number => number > 0, "The maximum number must be a positive integer.") .createWithDefault(12) val JOIN_REORDER_CARD_WEIGHT = buildConf("spark.sql.cbo.joinReorder.card.weight") .internal() .doc("The weight of the ratio of cardinalities (number of rows) " + "in the cost comparison function. The ratio of sizes in bytes has weight " + "1 - this value. The weighted geometric mean of these ratios is used to decide " + "which of the candidate plans will be chosen by the CBO.") .version("2.2.0") .doubleConf .checkValue(weight => weight >= 0 && weight <= 1, "The weight value must be in [0, 1].") .createWithDefault(0.7) val JOIN_REORDER_DP_STAR_FILTER = buildConf("spark.sql.cbo.joinReorder.dp.star.filter") .doc("Applies star-join filter heuristics to cost based join enumeration.") .version("2.2.0") .booleanConf .createWithDefault(false) val STARSCHEMA_DETECTION = buildConf("spark.sql.cbo.starSchemaDetection") .doc("When true, it enables join reordering based on star schema detection. ") .version("2.2.0") .booleanConf .createWithDefault(false) val STARSCHEMA_FACT_TABLE_RATIO = buildConf("spark.sql.cbo.starJoinFTRatio") .internal() .doc("Specifies the upper limit of the ratio between the largest fact tables" + " for a star join to be considered. ") .version("2.2.0") .doubleConf .createWithDefault(0.9) private def isValidTimezone(zone: String): Boolean = { Try { DateTimeUtils.getZoneId(zone) }.isSuccess } val SESSION_LOCAL_TIMEZONE = buildConf("spark.sql.session.timeZone") .doc("The ID of session local timezone in the format of either region-based zone IDs or " + "zone offsets. Region IDs must have the form 'area/city', such as 'America/Los_Angeles'. " + "Zone offsets must be in the format '(+|-)HH', '(+|-)HH:mm' or '(+|-)HH:mm:ss', e.g '-08', " + "'+01:00' or '-13:33:33'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'. Other " + "short names are not recommended to use because they can be ambiguous.") .version("2.2.0") .stringConf .checkValue(isValidTimezone, s"Cannot resolve the given timezone with" + " ZoneId.of(_, ZoneId.SHORT_IDS)") .createWithDefaultFunction(() => TimeZone.getDefault.getID) val WINDOW_EXEC_BUFFER_IN_MEMORY_THRESHOLD = buildConf("spark.sql.windowExec.buffer.in.memory.threshold") .internal() .doc("Threshold for number of rows guaranteed to be held in memory by the window operator") .version("2.2.1") .intConf .createWithDefault(4096) val WINDOW_EXEC_BUFFER_SPILL_THRESHOLD = buildConf("spark.sql.windowExec.buffer.spill.threshold") .internal() .doc("Threshold for number of rows to be spilled by window operator") .version("2.2.0") .intConf .createWithDefault(SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD.defaultValue.get) val SESSION_WINDOW_BUFFER_IN_MEMORY_THRESHOLD = buildConf("spark.sql.sessionWindow.buffer.in.memory.threshold") .internal() .doc("Threshold for number of windows guaranteed to be held in memory by the " + "session window operator. Note that the buffer is used only for the query Spark " + "cannot apply aggregations on determining session window.") .version("3.2.0") .intConf .createWithDefault(4096) val SESSION_WINDOW_BUFFER_SPILL_THRESHOLD = buildConf("spark.sql.sessionWindow.buffer.spill.threshold") .internal() .doc("Threshold for number of rows to be spilled by window operator. Note that " + "the buffer is used only for the query Spark cannot apply aggregations on determining " + "session window.") .version("3.2.0") .intConf .createWithDefault(SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD.defaultValue.get) val SORT_MERGE_JOIN_EXEC_BUFFER_IN_MEMORY_THRESHOLD = buildConf("spark.sql.sortMergeJoinExec.buffer.in.memory.threshold") .internal() .doc("Threshold for number of rows guaranteed to be held in memory by the sort merge " + "join operator") .version("2.2.1") .intConf .createWithDefault(ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH) val SORT_MERGE_JOIN_EXEC_BUFFER_SPILL_THRESHOLD = buildConf("spark.sql.sortMergeJoinExec.buffer.spill.threshold") .internal() .doc("Threshold for number of rows to be spilled by sort merge join operator") .version("2.2.0") .intConf .createWithDefault(SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD.defaultValue.get) val CARTESIAN_PRODUCT_EXEC_BUFFER_IN_MEMORY_THRESHOLD = buildConf("spark.sql.cartesianProductExec.buffer.in.memory.threshold") .internal() .doc("Threshold for number of rows guaranteed to be held in memory by the cartesian " + "product operator") .version("2.2.1") .intConf .createWithDefault(4096) val CARTESIAN_PRODUCT_EXEC_BUFFER_SPILL_THRESHOLD = buildConf("spark.sql.cartesianProductExec.buffer.spill.threshold") .internal() .doc("Threshold for number of rows to be spilled by cartesian product operator") .version("2.2.0") .intConf .createWithDefault(SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD.defaultValue.get) val SUPPORT_QUOTED_REGEX_COLUMN_NAME = buildConf("spark.sql.parser.quotedRegexColumnNames") .doc("When true, quoted Identifiers (using backticks) in SELECT statement are interpreted" + " as regular expressions.") .version("2.3.0") .booleanConf .createWithDefault(false) val RANGE_EXCHANGE_SAMPLE_SIZE_PER_PARTITION = buildConf("spark.sql.execution.rangeExchange.sampleSizePerPartition") .internal() .doc("Number of points to sample per partition in order to determine the range boundaries" + " for range partitioning, typically used in global sorting (without limit).") .version("2.3.0") .intConf .createWithDefault(100) val ARROW_EXECUTION_ENABLED = buildConf("spark.sql.execution.arrow.enabled") .doc("(Deprecated since Spark 3.0, please set 'spark.sql.execution.arrow.pyspark.enabled'.)") .version("2.3.0") .booleanConf .createWithDefault(false) val ARROW_PYSPARK_EXECUTION_ENABLED = buildConf("spark.sql.execution.arrow.pyspark.enabled") .doc("When true, make use of Apache Arrow for columnar data transfers in PySpark. " + "This optimization applies to: " + "1. pyspark.sql.DataFrame.toPandas " + "2. pyspark.sql.SparkSession.createDataFrame when its input is a Pandas DataFrame " + "The following data types are unsupported: " + "ArrayType of TimestampType, and nested StructType.") .version("3.0.0") .fallbackConf(ARROW_EXECUTION_ENABLED) val ARROW_PYSPARK_SELF_DESTRUCT_ENABLED = buildConf("spark.sql.execution.arrow.pyspark.selfDestruct.enabled") .doc("(Experimental) When true, make use of Apache Arrow's self-destruct and split-blocks " + "options for columnar data transfers in PySpark, when converting from Arrow to Pandas. " + "This reduces memory usage at the cost of some CPU time. " + "This optimization applies to: pyspark.sql.DataFrame.toPandas " + "when 'spark.sql.execution.arrow.pyspark.enabled' is set.") .version("3.2.0") .booleanConf .createWithDefault(false) val PYSPARK_JVM_STACKTRACE_ENABLED = buildConf("spark.sql.pyspark.jvmStacktrace.enabled") .doc("When true, it shows the JVM stacktrace in the user-facing PySpark exception " + "together with Python stacktrace. By default, it is disabled and hides JVM stacktrace " + "and shows a Python-friendly exception only.") .version("3.0.0") .booleanConf // show full stacktrace in tests but hide in production by default. .createWithDefault(Utils.isTesting) val ARROW_SPARKR_EXECUTION_ENABLED = buildConf("spark.sql.execution.arrow.sparkr.enabled") .doc("When true, make use of Apache Arrow for columnar data transfers in SparkR. " + "This optimization applies to: " + "1. createDataFrame when its input is an R DataFrame " + "2. collect " + "3. dapply " + "4. gapply " + "The following data types are unsupported: " + "FloatType, BinaryType, ArrayType, StructType and MapType.") .version("3.0.0") .booleanConf .createWithDefault(false) val ARROW_FALLBACK_ENABLED = buildConf("spark.sql.execution.arrow.fallback.enabled") .doc("(Deprecated since Spark 3.0, please set " + "'spark.sql.execution.arrow.pyspark.fallback.enabled'.)") .version("2.4.0") .booleanConf .createWithDefault(true) val ARROW_PYSPARK_FALLBACK_ENABLED = buildConf("spark.sql.execution.arrow.pyspark.fallback.enabled") .doc(s"When true, optimizations enabled by '${ARROW_PYSPARK_EXECUTION_ENABLED.key}' will " + "fallback automatically to non-optimized implementations if an error occurs.") .version("3.0.0") .fallbackConf(ARROW_FALLBACK_ENABLED) val ARROW_EXECUTION_MAX_RECORDS_PER_BATCH = buildConf("spark.sql.execution.arrow.maxRecordsPerBatch") .doc("When using Apache Arrow, limit the maximum number of records that can be written " + "to a single ArrowRecordBatch in memory. If set to zero or negative there is no limit.") .version("2.3.0") .intConf .createWithDefault(10000) val PANDAS_UDF_BUFFER_SIZE = buildConf("spark.sql.execution.pandas.udf.buffer.size") .doc( s"Same as `${BUFFER_SIZE.key}` but only applies to Pandas UDF executions. If it is not " + s"set, the fallback is `${BUFFER_SIZE.key}`. Note that Pandas execution requires more " + "than 4 bytes. Lowering this value could make small Pandas UDF batch iterated and " + "pipelined; however, it might degrade performance. See SPARK-27870.") .version("3.0.0") .fallbackConf(BUFFER_SIZE) val PYSPARK_SIMPLIFIEID_TRACEBACK = buildConf("spark.sql.execution.pyspark.udf.simplifiedTraceback.enabled") .doc( "When true, the traceback from Python UDFs is simplified. It hides " + "the Python worker, (de)serialization, etc from PySpark in tracebacks, and only " + "shows the exception messages from UDFs. Note that this works only with CPython 3.7+.") .version("3.1.0") .booleanConf // show full stacktrace in tests but hide in production by default. .createWithDefault(!Utils.isTesting) val PANDAS_GROUPED_MAP_ASSIGN_COLUMNS_BY_NAME = buildConf("spark.sql.legacy.execution.pandas.groupedMap.assignColumnsByName") .internal() .doc("When true, columns will be looked up by name if labeled with a string and fallback " + "to use position if not. When false, a grouped map Pandas UDF will assign columns from " + "the returned Pandas DataFrame based on position, regardless of column label type. " + "This configuration will be deprecated in future releases.") .version("2.4.1") .booleanConf .createWithDefault(true) val PANDAS_ARROW_SAFE_TYPE_CONVERSION = buildConf("spark.sql.execution.pandas.convertToArrowArraySafely") .internal() .doc("When true, Arrow will perform safe type conversion when converting " + "Pandas.Series to Arrow array during serialization. Arrow will raise errors " + "when detecting unsafe type conversion like overflow. When false, disabling Arrow's type " + "check and do type conversions anyway. This config only works for Arrow 0.11.0+.") .version("3.0.0") .booleanConf .createWithDefault(false) val REPLACE_EXCEPT_WITH_FILTER = buildConf("spark.sql.optimizer.replaceExceptWithFilter") .internal() .doc("When true, the apply function of the rule verifies whether the right node of the" + " except operation is of type Filter or Project followed by Filter. If yes, the rule" + " further verifies 1) Excluding the filter operations from the right (as well as the" + " left node, if any) on the top, whether both the nodes evaluates to a same result." + " 2) The left and right nodes don't contain any SubqueryExpressions. 3) The output" + " column names of the left node are distinct. If all the conditions are met, the" + " rule will replace the except operation with a Filter by flipping the filter" + " condition(s) of the right node.") .version("2.3.0") .booleanConf .createWithDefault(true) val DECIMAL_OPERATIONS_ALLOW_PREC_LOSS = buildConf("spark.sql.decimalOperations.allowPrecisionLoss") .internal() .doc("When true (default), establishing the result type of an arithmetic operation " + "happens according to Hive behavior and SQL ANSI 2011 specification, i.e. rounding the " + "decimal part of the result if an exact representation is not possible. Otherwise, NULL " + "is returned in those cases, as previously.") .version("2.3.1") .booleanConf .createWithDefault(true) val LITERAL_PICK_MINIMUM_PRECISION = buildConf("spark.sql.legacy.literal.pickMinimumPrecision") .internal() .doc("When integral literal is used in decimal operations, pick a minimum precision " + "required by the literal if this config is true, to make the resulting precision and/or " + "scale smaller. This can reduce the possibility of precision lose and/or overflow.") .version("2.3.3") .booleanConf .createWithDefault(true) val SQL_OPTIONS_REDACTION_PATTERN = buildConf("spark.sql.redaction.options.regex") .doc("Regex to decide which keys in a Spark SQL command's options map contain sensitive " + "information. The values of options whose names that match this regex will be redacted " + "in the explain output. This redaction is applied on top of the global redaction " + s"configuration defined by ${SECRET_REDACTION_PATTERN.key}.") .version("2.2.2") .regexConf .createWithDefault("(?i)url".r) val SQL_STRING_REDACTION_PATTERN = buildConf("spark.sql.redaction.string.regex") .doc("Regex to decide which parts of strings produced by Spark contain sensitive " + "information. When this regex matches a string part, that string part is replaced by a " + "dummy value. This is currently used to redact the output of SQL explain commands. " + "When this conf is not set, the value from `spark.redaction.string.regex` is used.") .version("2.3.0") .fallbackConf(org.apache.spark.internal.config.STRING_REDACTION_PATTERN) val CONCAT_BINARY_AS_STRING = buildConf("spark.sql.function.concatBinaryAsString") .doc("When this option is set to false and all inputs are binary, `functions.concat` returns " + "an output as binary. Otherwise, it returns as a string.") .version("2.3.0") .booleanConf .createWithDefault(false) val ELT_OUTPUT_AS_STRING = buildConf("spark.sql.function.eltOutputAsString") .doc("When this option is set to false and all inputs are binary, `elt` returns " + "an output as binary. Otherwise, it returns as a string.") .version("2.3.0") .booleanConf .createWithDefault(false) val VALIDATE_PARTITION_COLUMNS = buildConf("spark.sql.sources.validatePartitionColumns") .internal() .doc("When this option is set to true, partition column values will be validated with " + "user-specified schema. If the validation fails, a runtime exception is thrown. " + "When this option is set to false, the partition column value will be converted to null " + "if it can not be casted to corresponding user-specified schema.") .version("3.0.0") .booleanConf .createWithDefault(true) val CONTINUOUS_STREAMING_EPOCH_BACKLOG_QUEUE_SIZE = buildConf("spark.sql.streaming.continuous.epochBacklogQueueSize") .doc("The max number of entries to be stored in queue to wait for late epochs. " + "If this parameter is exceeded by the size of the queue, stream will stop with an error.") .version("3.0.0") .intConf .createWithDefault(10000) val CONTINUOUS_STREAMING_EXECUTOR_QUEUE_SIZE = buildConf("spark.sql.streaming.continuous.executorQueueSize") .internal() .doc("The size (measured in number of rows) of the queue used in continuous execution to" + " buffer the results of a ContinuousDataReader.") .version("2.3.0") .intConf .createWithDefault(1024) val CONTINUOUS_STREAMING_EXECUTOR_POLL_INTERVAL_MS = buildConf("spark.sql.streaming.continuous.executorPollIntervalMs") .internal() .doc("The interval at which continuous execution readers will poll to check whether" + " the epoch has advanced on the driver.") .version("2.3.0") .timeConf(TimeUnit.MILLISECONDS) .createWithDefault(100) val USE_V1_SOURCE_LIST = buildConf("spark.sql.sources.useV1SourceList") .internal() .doc("A comma-separated list of data source short names or fully qualified data source " + "implementation class names for which Data Source V2 code path is disabled. These data " + "sources will fallback to Data Source V1 code path.") .version("3.0.0") .stringConf .createWithDefault("avro,csv,json,kafka,orc,parquet,text") val DISABLED_V2_STREAMING_WRITERS = buildConf("spark.sql.streaming.disabledV2Writers") .doc("A comma-separated list of fully qualified data source register class names for which" + " StreamWriteSupport is disabled. Writes to these sources will fall back to the V1 Sinks.") .version("2.3.1") .stringConf .createWithDefault("") val DISABLED_V2_STREAMING_MICROBATCH_READERS = buildConf("spark.sql.streaming.disabledV2MicroBatchReaders") .internal() .doc( "A comma-separated list of fully qualified data source register class names for which " + "MicroBatchReadSupport is disabled. Reads from these sources will fall back to the " + "V1 Sources.") .version("2.4.0") .stringConf .createWithDefault("") val FASTFAIL_ON_FILEFORMAT_OUTPUT = buildConf("spark.sql.execution.fastFailOnFileFormatOutput") .internal() .doc("Whether to fast fail task execution when writing output to FileFormat datasource. " + "If this is enabled, in `FileFormatWriter` we will catch `FileAlreadyExistsException` " + "and fast fail output task without further task retry. Only enabling this if you know " + "the `FileAlreadyExistsException` of the output task is unrecoverable, i.e., further " + "task attempts won't be able to success. If the `FileAlreadyExistsException` might be " + "recoverable, you should keep this as disabled and let Spark to retry output tasks. " + "This is disabled by default.") .version("3.0.2") .booleanConf .createWithDefault(false) object PartitionOverwriteMode extends Enumeration { val STATIC, DYNAMIC = Value } val PARTITION_OVERWRITE_MODE = buildConf("spark.sql.sources.partitionOverwriteMode") .doc("When INSERT OVERWRITE a partitioned data source table, we currently support 2 modes: " + "static and dynamic. In static mode, Spark deletes all the partitions that match the " + "partition specification(e.g. PARTITION(a=1,b)) in the INSERT statement, before " + "overwriting. In dynamic mode, Spark doesn't delete partitions ahead, and only overwrite " + "those partitions that have data written into it at runtime. By default we use static " + "mode to keep the same behavior of Spark prior to 2.3. Note that this config doesn't " + "affect Hive serde tables, as they are always overwritten with dynamic mode. This can " + "also be set as an output option for a data source using key partitionOverwriteMode " + "(which takes precedence over this setting), e.g. " + "dataframe.write.option(\\"partitionOverwriteMode\\", \\"dynamic\\").save(path)." ) .version("2.3.0") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(PartitionOverwriteMode.values.map(_.toString)) .createWithDefault(PartitionOverwriteMode.STATIC.toString) object StoreAssignmentPolicy extends Enumeration { val ANSI, LEGACY, STRICT = Value } val STORE_ASSIGNMENT_POLICY = buildConf("spark.sql.storeAssignmentPolicy") .doc("When inserting a value into a column with different data type, Spark will perform " + "type coercion. Currently, we support 3 policies for the type coercion rules: ANSI, " + "legacy and strict. With ANSI policy, Spark performs the type coercion as per ANSI SQL. " + "In practice, the behavior is mostly the same as PostgreSQL. " + "It disallows certain unreasonable type conversions such as converting " + "`string` to `int` or `double` to `boolean`. " + "With legacy policy, Spark allows the type coercion as long as it is a valid `Cast`, " + "which is very loose. e.g. converting `string` to `int` or `double` to `boolean` is " + "allowed. It is also the only behavior in Spark 2.x and it is compatible with Hive. " + "With strict policy, Spark doesn't allow any possible precision loss or data truncation " + "in type coercion, e.g. converting `double` to `int` or `decimal` to `double` is " + "not allowed." ) .version("3.0.0") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(StoreAssignmentPolicy.values.map(_.toString)) .createWithDefault(StoreAssignmentPolicy.ANSI.toString) val ANSI_ENABLED = buildConf("spark.sql.ansi.enabled") .doc("When true, Spark SQL uses an ANSI compliant dialect instead of being Hive compliant. " + "For example, Spark will throw an exception at runtime instead of returning null results " + "when the inputs to a SQL operator/function are invalid." + "For full details of this dialect, you can find them in the section \\"ANSI Compliance\\" of " + "Spark's documentation. Some ANSI dialect features may be not from the ANSI SQL " + "standard directly, but their behaviors align with ANSI SQL's style") .version("3.0.0") .booleanConf .createWithDefault(sys.env.get("SPARK_ANSI_SQL_MODE").contains("true")) val ENFORCE_RESERVED_KEYWORDS = buildConf("spark.sql.ansi.enforceReservedKeywords") .doc(s"When true and '${ANSI_ENABLED.key}' is true, the Spark SQL parser enforces the ANSI " + "reserved keywords and forbids SQL queries that use reserved keywords as alias names " + "and/or identifiers for table, view, function, etc.") .version("3.3.0") .booleanConf .createWithDefault(false) val ANSI_STRICT_INDEX_OPERATOR = buildConf("spark.sql.ansi.strictIndexOperator") .doc(s"When true and '${ANSI_ENABLED.key}' is true, accessing complex SQL types via [] " + "operator will throw an exception if array index is out of bound, or map key does not " + "exist. Otherwise, Spark will return a null result when accessing an invalid index.") .version("3.3.0") .booleanConf .createWithDefault(true) val SORT_BEFORE_REPARTITION = buildConf("spark.sql.execution.sortBeforeRepartition") .internal() .doc("When perform a repartition following a shuffle, the output row ordering would be " + "nondeterministic. If some downstream stages fail and some tasks of the repartition " + "stage retry, these tasks may generate different data, and that can lead to correctness " + "issues. Turn on this config to insert a local sort before actually doing repartition " + "to generate consistent repartition results. The performance of repartition() may go " + "down since we insert extra local sort before it.") .version("2.1.4") .booleanConf .createWithDefault(true) val NESTED_SCHEMA_PRUNING_ENABLED = buildConf("spark.sql.optimizer.nestedSchemaPruning.enabled") .internal() .doc("Prune nested fields from a logical relation's output which are unnecessary in " + "satisfying a query. This optimization allows columnar file format readers to avoid " + "reading unnecessary nested column data. Currently Parquet and ORC are the " + "data sources that implement this optimization.") .version("2.4.1") .booleanConf .createWithDefault(true) val DISABLE_HINTS = buildConf("spark.sql.optimizer.disableHints") .internal() .doc("When true, the optimizer will disable user-specified hints that are additional " + "directives for better planning of a query.") .version("3.1.0") .booleanConf .createWithDefault(false) val NESTED_PREDICATE_PUSHDOWN_FILE_SOURCE_LIST = buildConf("spark.sql.optimizer.nestedPredicatePushdown.supportedFileSources") .internal() .doc("A comma-separated list of data source short names or fully qualified data source " + "implementation class names for which Spark tries to push down predicates for nested " + "columns and/or names containing `dots` to data sources. This configuration is only " + "effective with file-based data sources in DSv1. Currently, Parquet and ORC implement " + "both optimizations. The other data sources don't support this feature yet. So the " + "default value is 'parquet,orc'.") .version("3.0.0") .stringConf .createWithDefault("parquet,orc") val SERIALIZER_NESTED_SCHEMA_PRUNING_ENABLED = buildConf("spark.sql.optimizer.serializer.nestedSchemaPruning.enabled") .internal() .doc("Prune nested fields from object serialization operator which are unnecessary in " + "satisfying a query. This optimization allows object serializers to avoid " + "executing unnecessary nested expressions.") .version("3.0.0") .booleanConf .createWithDefault(true) val NESTED_PRUNING_ON_EXPRESSIONS = buildConf("spark.sql.optimizer.expression.nestedPruning.enabled") .internal() .doc("Prune nested fields from expressions in an operator which are unnecessary in " + "satisfying a query. Note that this optimization doesn't prune nested fields from " + "physical data source scanning. For pruning nested fields from scanning, please use " + "`spark.sql.optimizer.nestedSchemaPruning.enabled` config.") .version("3.0.0") .booleanConf .createWithDefault(true) val DECORRELATE_INNER_QUERY_ENABLED = buildConf("spark.sql.optimizer.decorrelateInnerQuery.enabled") .internal() .doc("Decorrelate inner query by eliminating correlated references and build domain joins.") .version("3.2.0") .booleanConf .createWithDefault(true) val OPTIMIZE_ONE_ROW_RELATION_SUBQUERY = buildConf("spark.sql.optimizer.optimizeOneRowRelationSubquery") .internal() .doc("When true, the optimizer will inline subqueries with OneRowRelation as leaf nodes.") .version("3.2.0") .booleanConf .createWithDefault(true) val TOP_K_SORT_FALLBACK_THRESHOLD = buildConf("spark.sql.execution.topKSortFallbackThreshold") .doc("In SQL queries with a SORT followed by a LIMIT like " + "'SELECT x FROM t ORDER BY y LIMIT m', if m is under this threshold, do a top-K sort" + " in memory, otherwise do a global sort which spills to disk if necessary.") .version("2.4.0") .intConf .createWithDefault(ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH) object Deprecated { val MAPRED_REDUCE_TASKS = "mapred.reduce.tasks" } object Replaced { val MAPREDUCE_JOB_REDUCES = "mapreduce.job.reduces" } val CSV_PARSER_COLUMN_PRUNING = buildConf("spark.sql.csv.parser.columnPruning.enabled") .internal() .doc("If it is set to true, column names of the requested schema are passed to CSV parser. " + "Other column values can be ignored during parsing even if they are malformed.") .version("2.4.0") .booleanConf .createWithDefault(true) val CSV_INPUT_BUFFER_SIZE = buildConf("spark.sql.csv.parser.inputBufferSize") .internal() .doc("If it is set, it configures the buffer size of CSV input during parsing. " + "It is the same as inputBufferSize option in CSV which has a higher priority. " + "Note that this is a workaround for the parsing library's regression, and this " + "configuration is internal and supposed to be removed in the near future.") .version("3.0.3") .intConf .createOptional val REPL_EAGER_EVAL_ENABLED = buildConf("spark.sql.repl.eagerEval.enabled") .doc("Enables eager evaluation or not. When true, the top K rows of Dataset will be " + "displayed if and only if the REPL supports the eager evaluation. Currently, the " + "eager evaluation is supported in PySpark and SparkR. In PySpark, for the notebooks like " + "Jupyter, the HTML table (generated by _repr_html_) will be returned. For plain Python " + "REPL, the returned outputs are formatted like dataframe.show(). In SparkR, the returned " + "outputs are showed similar to R data.frame would.") .version("2.4.0") .booleanConf .createWithDefault(false) val REPL_EAGER_EVAL_MAX_NUM_ROWS = buildConf("spark.sql.repl.eagerEval.maxNumRows") .doc("The max number of rows that are returned by eager evaluation. This only takes " + s"effect when ${REPL_EAGER_EVAL_ENABLED.key} is set to true. The valid range of this " + "config is from 0 to (Int.MaxValue - 1), so the invalid config like negative and " + "greater than (Int.MaxValue - 1) will be normalized to 0 and (Int.MaxValue - 1).") .version("2.4.0") .intConf .createWithDefault(20) val REPL_EAGER_EVAL_TRUNCATE = buildConf("spark.sql.repl.eagerEval.truncate") .doc("The max number of characters for each cell that is returned by eager evaluation. " + s"This only takes effect when ${REPL_EAGER_EVAL_ENABLED.key} is set to true.") .version("2.4.0") .intConf .createWithDefault(20) val FAST_HASH_AGGREGATE_MAX_ROWS_CAPACITY_BIT = buildConf("spark.sql.codegen.aggregate.fastHashMap.capacityBit") .internal() .doc("Capacity for the max number of rows to be held in memory " + "by the fast hash aggregate product operator. The bit is not for actual value, " + "but the actual numBuckets is determined by loadFactor " + "(e.g: default bit value 16 , the actual numBuckets is ((1 << 16) / 0.5).") .version("2.4.0") .intConf .checkValue(bit => bit >= 10 && bit <= 30, "The bit value must be in [10, 30].") .createWithDefault(16) val AVRO_COMPRESSION_CODEC = buildConf("spark.sql.avro.compression.codec") .doc("Compression codec used in writing of AVRO files. Supported codecs: " + "uncompressed, deflate, snappy, bzip2, xz and zstandard. Default codec is snappy.") .version("2.4.0") .stringConf .checkValues(Set("uncompressed", "deflate", "snappy", "bzip2", "xz", "zstandard")) .createWithDefault("snappy") val AVRO_DEFLATE_LEVEL = buildConf("spark.sql.avro.deflate.level") .doc("Compression level for the deflate codec used in writing of AVRO files. " + "Valid value must be in the range of from 1 to 9 inclusive or -1. " + "The default value is -1 which corresponds to 6 level in the current implementation.") .version("2.4.0") .intConf .checkValues((1 to 9).toSet + Deflater.DEFAULT_COMPRESSION) .createWithDefault(Deflater.DEFAULT_COMPRESSION) val LEGACY_SIZE_OF_NULL = buildConf("spark.sql.legacy.sizeOfNull") .internal() .doc(s"If it is set to false, or ${ANSI_ENABLED.key} is true, then size of null returns " + "null. Otherwise, it returns -1, which was inherited from Hive.") .version("2.4.0") .booleanConf .createWithDefault(true) val LEGACY_PARSE_NULL_PARTITION_SPEC_AS_STRING_LITERAL = buildConf("spark.sql.legacy.parseNullPartitionSpecAsStringLiteral") .internal() .doc("If it is set to true, `PARTITION(col=null)` is parsed as a string literal of its " + "text representation, e.g., string 'null', when the partition column is string type. " + "Otherwise, it is always parsed as a null literal in the partition spec.") .version("3.0.2") .booleanConf .createWithDefault(false) val LEGACY_REPLACE_DATABRICKS_SPARK_AVRO_ENABLED = buildConf("spark.sql.legacy.replaceDatabricksSparkAvro.enabled") .internal() .doc("If it is set to true, the data source provider com.databricks.spark.avro is mapped " + "to the built-in but external Avro data source module for backward compatibility.") .version("2.4.0") .booleanConf .createWithDefault(true) val LEGACY_SETOPS_PRECEDENCE_ENABLED = buildConf("spark.sql.legacy.setopsPrecedence.enabled") .internal() .doc("When set to true and the order of evaluation is not specified by parentheses, the " + "set operations are performed from left to right as they appear in the query. When set " + "to false and order of evaluation is not specified by parentheses, INTERSECT operations " + "are performed before any UNION, EXCEPT and MINUS operations.") .version("2.4.0") .booleanConf .createWithDefault(false) val LEGACY_EXPONENT_LITERAL_AS_DECIMAL_ENABLED = buildConf("spark.sql.legacy.exponentLiteralAsDecimal.enabled") .internal() .doc("When set to true, a literal with an exponent (e.g. 1E-30) would be parsed " + "as Decimal rather than Double.") .version("3.0.0") .booleanConf .createWithDefault(false) val LEGACY_ALLOW_NEGATIVE_SCALE_OF_DECIMAL_ENABLED = buildConf("spark.sql.legacy.allowNegativeScaleOfDecimal") .internal() .doc("When set to true, negative scale of Decimal type is allowed. For example, " + "the type of number 1E10BD under legacy mode is DecimalType(2, -9), but is " + "Decimal(11, 0) in non legacy mode.") .version("3.0.0") .booleanConf .createWithDefault(false) val LEGACY_BUCKETED_TABLE_SCAN_OUTPUT_ORDERING = buildConf("spark.sql.legacy.bucketedTableScan.outputOrdering") .internal() .doc("When true, the bucketed table scan will list files during planning to figure out the " + "output ordering, which is expensive and may make the planning quite slow.") .version("3.0.0") .booleanConf .createWithDefault(false) val LEGACY_HAVING_WITHOUT_GROUP_BY_AS_WHERE = buildConf("spark.sql.legacy.parser.havingWithoutGroupByAsWhere") .internal() .doc("If it is set to true, the parser will treat HAVING without GROUP BY as a normal " + "WHERE, which does not follow SQL standard.") .version("2.4.1") .booleanConf .createWithDefault(false) val LEGACY_ALLOW_EMPTY_STRING_IN_JSON = buildConf("spark.sql.legacy.json.allowEmptyString.enabled") .internal() .doc("When set to true, the parser of JSON data source treats empty strings as null for " + "some data types such as `IntegerType`.") .version("3.0.0") .booleanConf .createWithDefault(false) val LEGACY_CREATE_EMPTY_COLLECTION_USING_STRING_TYPE = buildConf("spark.sql.legacy.createEmptyCollectionUsingStringType") .internal() .doc("When set to true, Spark returns an empty collection with `StringType` as element " + "type if the `array`/`map` function is called without any parameters. Otherwise, Spark " + "returns an empty collection with `NullType` as element type.") .version("3.0.0") .booleanConf .createWithDefault(false) val LEGACY_ALLOW_UNTYPED_SCALA_UDF = buildConf("spark.sql.legacy.allowUntypedScalaUDF") .internal() .doc("When set to true, user is allowed to use org.apache.spark.sql.functions." + "udf(f: AnyRef, dataType: DataType). Otherwise, an exception will be thrown at runtime.") .version("3.0.0") .booleanConf .createWithDefault(false) val LEGACY_STATISTICAL_AGGREGATE = buildConf("spark.sql.legacy.statisticalAggregate") .internal() .doc("When set to true, statistical aggregate function returns Double.NaN " + "if divide by zero occurred during expression evaluation, otherwise, it returns null. " + "Before version 3.1.0, it returns NaN in divideByZero case by default.") .version("3.1.0") .booleanConf .createWithDefault(false) val TRUNCATE_TABLE_IGNORE_PERMISSION_ACL = buildConf("spark.sql.truncateTable.ignorePermissionAcl.enabled") .internal() .doc("When set to true, TRUNCATE TABLE command will not try to set back original " + "permission and ACLs when re-creating the table/partition paths.") .version("2.4.6") .booleanConf .createWithDefault(false) val NAME_NON_STRUCT_GROUPING_KEY_AS_VALUE = buildConf("spark.sql.legacy.dataset.nameNonStructGroupingKeyAsValue") .internal() .doc("When set to true, the key attribute resulted from running `Dataset.groupByKey` " + "for non-struct key type, will be named as `value`, following the behavior of Spark " + "version 2.4 and earlier.") .version("3.0.0") .booleanConf .createWithDefault(false) val MAX_TO_STRING_FIELDS = buildConf("spark.sql.debug.maxToStringFields") .doc("Maximum number of fields of sequence-like entries can be converted to strings " + "in debug output. Any elements beyond the limit will be dropped and replaced by a" + """ "... N more fields" placeholder.""") .version("3.0.0") .intConf .createWithDefault(25) val MAX_PLAN_STRING_LENGTH = buildConf("spark.sql.maxPlanStringLength") .doc("Maximum number of characters to output for a plan string. If the plan is " + "longer, further output will be truncated. The default setting always generates a full " + "plan. Set this to a lower value such as 8k if plan strings are taking up too much " + "memory or are causing OutOfMemory errors in the driver or UI processes.") .version("3.0.0") .bytesConf(ByteUnit.BYTE) .checkValue(i => i >= 0 && i <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH, "Invalid " + "value for 'spark.sql.maxPlanStringLength'. Length must be a valid string length " + "(nonnegative and shorter than the maximum size).") .createWithDefaultString(s"${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}") val MAX_METADATA_STRING_LENGTH = buildConf("spark.sql.maxMetadataStringLength") .doc("Maximum number of characters to output for a metadata string. e.g. " + "file location in `DataSourceScanExec`, every value will be abbreviated if exceed length.") .version("3.1.0") .intConf .checkValue(_ > 3, "This value must be bigger than 3.") .createWithDefault(100) val SET_COMMAND_REJECTS_SPARK_CORE_CONFS = buildConf("spark.sql.legacy.setCommandRejectsSparkCoreConfs") .internal() .doc("If it is set to true, SET command will fail when the key is registered as " + "a SparkConf entry.") .version("3.0.0") .booleanConf .createWithDefault(true) object TimestampTypes extends Enumeration { val TIMESTAMP_NTZ, TIMESTAMP_LTZ = Value } val TIMESTAMP_TYPE = buildConf("spark.sql.timestampType") .doc("Configures the default timestamp type of Spark SQL, including SQL DDL, Cast clause " + s"and type literal. Setting the configuration as ${TimestampTypes.TIMESTAMP_NTZ} will " + "use TIMESTAMP WITHOUT TIME ZONE as the default type while putting it as " + s"${TimestampTypes.TIMESTAMP_LTZ} will use TIMESTAMP WITH LOCAL TIME ZONE. " + "Before the 3.3.0 release, Spark only supports the TIMESTAMP WITH " + "LOCAL TIME ZONE type.") .version("3.3.0") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(TimestampTypes.values.map(_.toString)) .createWithDefault(TimestampTypes.TIMESTAMP_LTZ.toString) val DATETIME_JAVA8API_ENABLED = buildConf("spark.sql.datetime.java8API.enabled") .doc("If the configuration property is set to true, java.time.Instant and " + "java.time.LocalDate classes of Java 8 API are used as external types for " + "Catalyst's TimestampType and DateType. If it is set to false, java.sql.Timestamp " + "and java.sql.Date are used for the same purpose.") .version("3.0.0") .booleanConf .createWithDefault(false) val UI_EXPLAIN_MODE = buildConf("spark.sql.ui.explainMode") .doc("Configures the query explain mode used in the Spark SQL UI. The value can be 'simple', " + "'extended', 'codegen', 'cost', or 'formatted'. The default value is 'formatted'.") .version("3.1.0") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValue(mode => Set("SIMPLE", "EXTENDED", "CODEGEN", "COST", "FORMATTED").contains(mode), "Invalid value for 'spark.sql.ui.explainMode'. Valid values are 'simple', 'extended', " + "'codegen', 'cost' and 'formatted'.") .createWithDefault("formatted") val SOURCES_BINARY_FILE_MAX_LENGTH = buildConf("spark.sql.sources.binaryFile.maxLength") .doc("The max length of a file that can be read by the binary file data source. " + "Spark will fail fast and not attempt to read the file if its length exceeds this value. " + "The theoretical max is Int.MaxValue, though VMs might implement a smaller max.") .version("3.0.0") .internal() .intConf .createWithDefault(Int.MaxValue) val LEGACY_CAST_DATETIME_TO_STRING = buildConf("spark.sql.legacy.typeCoercion.datetimeToString.enabled") .internal() .doc("If it is set to true, date/timestamp will cast to string in binary comparisons " + s"with String when ${ANSI_ENABLED.key} is false.") .version("3.0.0") .booleanConf .createWithDefault(false) val DEFAULT_CATALOG = buildConf("spark.sql.defaultCatalog") .doc("Name of the default catalog. This will be the current catalog if users have not " + "explicitly set the current catalog yet.") .version("3.0.0") .stringConf .createWithDefault(SESSION_CATALOG_NAME) val V2_SESSION_CATALOG_IMPLEMENTATION = buildConf(s"spark.sql.catalog.$SESSION_CATALOG_NAME") .doc("A catalog implementation that will be used as the v2 interface to Spark's built-in " + s"v1 catalog: $SESSION_CATALOG_NAME. This catalog shares its identifier namespace with " + s"the $SESSION_CATALOG_NAME and must be consistent with it; for example, if a table can " + s"be loaded by the $SESSION_CATALOG_NAME, this catalog must also return the table " + s"metadata. To delegate operations to the $SESSION_CATALOG_NAME, implementations can " + "extend 'CatalogExtension'.") .version("3.0.0") .stringConf .createOptional object MapKeyDedupPolicy extends Enumeration { val EXCEPTION, LAST_WIN = Value } val MAP_KEY_DEDUP_POLICY = buildConf("spark.sql.mapKeyDedupPolicy") .doc("The policy to deduplicate map keys in builtin function: CreateMap, MapFromArrays, " + "MapFromEntries, StringToMap, MapConcat and TransformKeys. When EXCEPTION, the query " + "fails if duplicated map keys are detected. When LAST_WIN, the map key that is inserted " + "at last takes precedence.") .version("3.0.0") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(MapKeyDedupPolicy.values.map(_.toString)) .createWithDefault(MapKeyDedupPolicy.EXCEPTION.toString) val LEGACY_LOOSE_UPCAST = buildConf("spark.sql.legacy.doLooseUpcast") .internal() .doc("When true, the upcast will be loose and allows string to atomic types.") .version("3.0.0") .booleanConf .createWithDefault(false) object LegacyBehaviorPolicy extends Enumeration { val EXCEPTION, LEGACY, CORRECTED = Value } val LEGACY_CTE_PRECEDENCE_POLICY = buildConf("spark.sql.legacy.ctePrecedencePolicy") .internal() .doc("When LEGACY, outer CTE definitions takes precedence over inner definitions. If set to " + "CORRECTED, inner CTE definitions take precedence. The default value is EXCEPTION, " + "AnalysisException is thrown while name conflict is detected in nested CTE. This config " + "will be removed in future versions and CORRECTED will be the only behavior.") .version("3.0.0") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(LegacyBehaviorPolicy.values.map(_.toString)) .createWithDefault(LegacyBehaviorPolicy.EXCEPTION.toString) val LEGACY_TIME_PARSER_POLICY = buildConf("spark.sql.legacy.timeParserPolicy") .internal() .doc("When LEGACY, java.text.SimpleDateFormat is used for formatting and parsing " + "dates/timestamps in a locale-sensitive manner, which is the approach before Spark 3.0. " + "When set to CORRECTED, classes from java.time.* packages are used for the same purpose. " + "The default value is EXCEPTION, RuntimeException is thrown when we will get different " + "results.") .version("3.0.0") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(LegacyBehaviorPolicy.values.map(_.toString)) .createWithDefault(LegacyBehaviorPolicy.EXCEPTION.toString) val LEGACY_ARRAY_EXISTS_FOLLOWS_THREE_VALUED_LOGIC = buildConf("spark.sql.legacy.followThreeValuedLogicInArrayExists") .internal() .doc("When true, the ArrayExists will follow the three-valued boolean logic.") .version("3.0.0") .booleanConf .createWithDefault(true) val ADDITIONAL_REMOTE_REPOSITORIES = buildConf("spark.sql.maven.additionalRemoteRepositories") .doc("A comma-delimited string config of the optional additional remote Maven mirror " + "repositories. This is only used for downloading Hive jars in IsolatedClientLoader " + "if the default Maven Central repo is unreachable.") .version("3.0.0") .stringConf .createWithDefault( sys.env.getOrElse("DEFAULT_ARTIFACT_REPOSITORY", "https://maven-central.storage-download.googleapis.com/maven2/")) val LEGACY_FROM_DAYTIME_STRING = buildConf("spark.sql.legacy.fromDayTimeString.enabled") .internal() .doc("When true, the `from` bound is not taken into account in conversion of " + "a day-time string to an interval, and the `to` bound is used to skip " + "all interval units out of the specified range. If it is set to `false`, " + "`ParseException` is thrown if the input does not match to the pattern " + "defined by `from` and `to`.") .version("3.0.0") .booleanConf .createWithDefault(false) val LEGACY_PROPERTY_NON_RESERVED = buildConf("spark.sql.legacy.notReserveProperties") .internal() .doc("When true, all database and table properties are not reserved and available for " + "create/alter syntaxes. But please be aware that the reserved properties will be " + "silently removed.") .version("3.0.0") .booleanConf .createWithDefault(false) val LEGACY_ADD_SINGLE_FILE_IN_ADD_FILE = buildConf("spark.sql.legacy.addSingleFileInAddFile") .internal() .doc("When true, only a single file can be added using ADD FILE. If false, then users " + "can add directory by passing directory path to ADD FILE.") .version("3.0.0") .booleanConf .createWithDefault(false) val LEGACY_MSSQLSERVER_NUMERIC_MAPPING_ENABLED = buildConf("spark.sql.legacy.mssqlserver.numericMapping.enabled") .internal() .doc("When true, use legacy MySqlServer SMALLINT and REAL type mapping.") .version("2.4.5") .booleanConf .createWithDefault(false) val CSV_FILTER_PUSHDOWN_ENABLED = buildConf("spark.sql.csv.filterPushdown.enabled") .doc("When true, enable filter pushdown to CSV datasource.") .version("3.0.0") .booleanConf .createWithDefault(true) val JSON_FILTER_PUSHDOWN_ENABLED = buildConf("spark.sql.json.filterPushdown.enabled") .doc("When true, enable filter pushdown to JSON datasource.") .version("3.1.0") .booleanConf .createWithDefault(true) val AVRO_FILTER_PUSHDOWN_ENABLED = buildConf("spark.sql.avro.filterPushdown.enabled") .doc("When true, enable filter pushdown to Avro datasource.") .version("3.1.0") .booleanConf .createWithDefault(true) val ADD_PARTITION_BATCH_SIZE = buildConf("spark.sql.addPartitionInBatch.size") .internal() .doc("The number of partitions to be handled in one turn when use " + "`AlterTableAddPartitionCommand` or `RepairTableCommand` to add partitions into table. " + "The smaller batch size is, the less memory is required for the real handler, e.g. " + "Hive Metastore.") .version("3.0.0") .intConf .checkValue(_ > 0, "The value of spark.sql.addPartitionInBatch.size must be positive") .createWithDefault(100) val LEGACY_ALLOW_HASH_ON_MAPTYPE = buildConf("spark.sql.legacy.allowHashOnMapType") .internal() .doc("When set to true, hash expressions can be applied on elements of MapType. Otherwise, " + "an analysis exception will be thrown.") .version("3.0.0") .booleanConf .createWithDefault(false) val LEGACY_INTEGER_GROUPING_ID = buildConf("spark.sql.legacy.integerGroupingId") .internal() .doc("When true, grouping_id() returns int values instead of long values.") .version("3.1.0") .booleanConf .createWithDefault(false) val PARQUET_INT96_REBASE_MODE_IN_WRITE = buildConf("spark.sql.parquet.int96RebaseModeInWrite") .internal() .doc("When LEGACY, Spark will rebase INT96 timestamps from Proleptic Gregorian calendar to " + "the legacy hybrid (Julian + Gregorian) calendar when writing Parquet files. " + "When CORRECTED, Spark will not do rebase and write the timestamps as it is. " + "When EXCEPTION, which is the default, Spark will fail the writing if it sees ancient " + "timestamps that are ambiguous between the two calendars.") .version("3.1.0") .withAlternative("spark.sql.legacy.parquet.int96RebaseModeInWrite") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(LegacyBehaviorPolicy.values.map(_.toString)) .createWithDefault(LegacyBehaviorPolicy.EXCEPTION.toString) val PARQUET_REBASE_MODE_IN_WRITE = buildConf("spark.sql.parquet.datetimeRebaseModeInWrite") .internal() .doc("When LEGACY, Spark will rebase dates/timestamps from Proleptic Gregorian calendar " + "to the legacy hybrid (Julian + Gregorian) calendar when writing Parquet files. " + "When CORRECTED, Spark will not do rebase and write the dates/timestamps as it is. " + "When EXCEPTION, which is the default, Spark will fail the writing if it sees " + "ancient dates/timestamps that are ambiguous between the two calendars. " + "This config influences on writes of the following parquet logical types: DATE, " + "TIMESTAMP_MILLIS, TIMESTAMP_MICROS. The INT96 type has the separate config: " + s"${PARQUET_INT96_REBASE_MODE_IN_WRITE.key}.") .version("3.0.0") .withAlternative("spark.sql.legacy.parquet.datetimeRebaseModeInWrite") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(LegacyBehaviorPolicy.values.map(_.toString)) .createWithDefault(LegacyBehaviorPolicy.EXCEPTION.toString) val PARQUET_INT96_REBASE_MODE_IN_READ = buildConf("spark.sql.parquet.int96RebaseModeInRead") .internal() .doc("When LEGACY, Spark will rebase INT96 timestamps from the legacy hybrid (Julian + " + "Gregorian) calendar to Proleptic Gregorian calendar when reading Parquet files. " + "When CORRECTED, Spark will not do rebase and read the timestamps as it is. " + "When EXCEPTION, which is the default, Spark will fail the reading if it sees ancient " + "timestamps that are ambiguous between the two calendars. This config is only effective " + "if the writer info (like Spark, Hive) of the Parquet files is unknown.") .version("3.1.0") .withAlternative("spark.sql.legacy.parquet.int96RebaseModeInRead") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(LegacyBehaviorPolicy.values.map(_.toString)) .createWithDefault(LegacyBehaviorPolicy.EXCEPTION.toString) val PARQUET_REBASE_MODE_IN_READ = buildConf("spark.sql.parquet.datetimeRebaseModeInRead") .internal() .doc("When LEGACY, Spark will rebase dates/timestamps from the legacy hybrid (Julian + " + "Gregorian) calendar to Proleptic Gregorian calendar when reading Parquet files. " + "When CORRECTED, Spark will not do rebase and read the dates/timestamps as it is. " + "When EXCEPTION, which is the default, Spark will fail the reading if it sees " + "ancient dates/timestamps that are ambiguous between the two calendars. This config is " + "only effective if the writer info (like Spark, Hive) of the Parquet files is unknown. " + "This config influences on reads of the following parquet logical types: DATE, " + "TIMESTAMP_MILLIS, TIMESTAMP_MICROS. The INT96 type has the separate config: " + s"${PARQUET_INT96_REBASE_MODE_IN_READ.key}.") .version("3.0.0") .withAlternative("spark.sql.legacy.parquet.datetimeRebaseModeInRead") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(LegacyBehaviorPolicy.values.map(_.toString)) .createWithDefault(LegacyBehaviorPolicy.EXCEPTION.toString) val AVRO_REBASE_MODE_IN_WRITE = buildConf("spark.sql.avro.datetimeRebaseModeInWrite") .internal() .doc("When LEGACY, Spark will rebase dates/timestamps from Proleptic Gregorian calendar " + "to the legacy hybrid (Julian + Gregorian) calendar when writing Avro files. " + "When CORRECTED, Spark will not do rebase and write the dates/timestamps as it is. " + "When EXCEPTION, which is the default, Spark will fail the writing if it sees " + "ancient dates/timestamps that are ambiguous between the two calendars.") .version("3.0.0") .withAlternative("spark.sql.legacy.avro.datetimeRebaseModeInWrite") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(LegacyBehaviorPolicy.values.map(_.toString)) .createWithDefault(LegacyBehaviorPolicy.EXCEPTION.toString) val AVRO_REBASE_MODE_IN_READ = buildConf("spark.sql.avro.datetimeRebaseModeInRead") .internal() .doc("When LEGACY, Spark will rebase dates/timestamps from the legacy hybrid (Julian + " + "Gregorian) calendar to Proleptic Gregorian calendar when reading Avro files. " + "When CORRECTED, Spark will not do rebase and read the dates/timestamps as it is. " + "When EXCEPTION, which is the default, Spark will fail the reading if it sees " + "ancient dates/timestamps that are ambiguous between the two calendars. This config is " + "only effective if the writer info (like Spark, Hive) of the Avro files is unknown.") .version("3.0.0") .withAlternative("spark.sql.legacy.avro.datetimeRebaseModeInRead") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(LegacyBehaviorPolicy.values.map(_.toString)) .createWithDefault(LegacyBehaviorPolicy.EXCEPTION.toString) val SCRIPT_TRANSFORMATION_EXIT_TIMEOUT = buildConf("spark.sql.scriptTransformation.exitTimeoutInSeconds") .internal() .doc("Timeout for executor to wait for the termination of transformation script when EOF.") .version("3.0.0") .timeConf(TimeUnit.SECONDS) .checkValue(_ > 0, "The timeout value must be positive") .createWithDefault(10L) val COALESCE_BUCKETS_IN_JOIN_ENABLED = buildConf("spark.sql.bucketing.coalesceBucketsInJoin.enabled") .doc("When true, if two bucketed tables with the different number of buckets are joined, " + "the side with a bigger number of buckets will be coalesced to have the same number " + "of buckets as the other side. Bigger number of buckets is divisible by the smaller " + "number of buckets. Bucket coalescing is applied to sort-merge joins and " + "shuffled hash join. Note: Coalescing bucketed table can avoid unnecessary shuffling " + "in join, but it also reduces parallelism and could possibly cause OOM for " + "shuffled hash join.") .version("3.1.0") .booleanConf .createWithDefault(false) val COALESCE_BUCKETS_IN_JOIN_MAX_BUCKET_RATIO = buildConf("spark.sql.bucketing.coalesceBucketsInJoin.maxBucketRatio") .doc("The ratio of the number of two buckets being coalesced should be less than or " + "equal to this value for bucket coalescing to be applied. This configuration only " + s"has an effect when '${COALESCE_BUCKETS_IN_JOIN_ENABLED.key}' is set to true.") .version("3.1.0") .intConf .checkValue(_ > 0, "The difference must be positive.") .createWithDefault(4) val BROADCAST_HASH_JOIN_OUTPUT_PARTITIONING_EXPAND_LIMIT = buildConf("spark.sql.execution.broadcastHashJoin.outputPartitioningExpandLimit") .internal() .doc("The maximum number of partitionings that a HashPartitioning can be expanded to. " + "This configuration is applicable only for BroadcastHashJoin inner joins and can be " + "set to '0' to disable this feature.") .version("3.1.0") .intConf .checkValue(_ >= 0, "The value must be non-negative.") .createWithDefault(8) val OPTIMIZE_NULL_AWARE_ANTI_JOIN = buildConf("spark.sql.optimizeNullAwareAntiJoin") .internal() .doc("When true, NULL-aware anti join execution will be planed into " + "BroadcastHashJoinExec with flag isNullAwareAntiJoin enabled, " + "optimized from O(M*N) calculation into O(M) calculation " + "using Hash lookup instead of Looping lookup." + "Only support for singleColumn NAAJ for now.") .version("3.1.0") .booleanConf .createWithDefault(true) val LEGACY_COMPLEX_TYPES_TO_STRING = buildConf("spark.sql.legacy.castComplexTypesToString.enabled") .internal() .doc("When true, maps and structs are wrapped by [] in casting to strings, and " + "NULL elements of structs/maps/arrays will be omitted while converting to strings. " + "Otherwise, if this is false, which is the default, maps and structs are wrapped by {}, " + "and NULL elements will be converted to \\"null\\".") .version("3.1.0") .booleanConf .createWithDefault(false) val LEGACY_PATH_OPTION_BEHAVIOR = buildConf("spark.sql.legacy.pathOptionBehavior.enabled") .internal() .doc("When true, \\"path\\" option is overwritten if one path parameter is passed to " + "DataFrameReader.load(), DataFrameWriter.save(), DataStreamReader.load(), or " + "DataStreamWriter.start(). Also, \\"path\\" option is added to the overall paths if " + "multiple path parameters are passed to DataFrameReader.load()") .version("3.1.0") .booleanConf .createWithDefault(false) val LEGACY_EXTRA_OPTIONS_BEHAVIOR = buildConf("spark.sql.legacy.extraOptionsBehavior.enabled") .internal() .doc("When true, the extra options will be ignored for DataFrameReader.table(). If set it " + "to false, which is the default, Spark will check if the extra options have the same " + "key, but the value is different with the table serde properties. If the check passes, " + "the extra options will be merged with the serde properties as the scan options. " + "Otherwise, an exception will be thrown.") .version("3.1.0") .booleanConf .createWithDefault(false) val LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT = buildConf("spark.sql.legacy.createHiveTableByDefault") .internal() .doc("When set to true, CREATE TABLE syntax without USING or STORED AS will use Hive " + s"instead of the value of ${DEFAULT_DATA_SOURCE_NAME.key} as the table provider.") .version("3.1.0") .booleanConf .createWithDefault(true) val LEGACY_CHAR_VARCHAR_AS_STRING = buildConf("spark.sql.legacy.charVarcharAsString") .internal() .doc("When true, Spark treats CHAR/VARCHAR type the same as STRING type, which is the " + "behavior of Spark 3.0 and earlier. This means no length check for CHAR/VARCHAR type and " + "no padding for CHAR type when writing data to the table.") .version("3.1.0") .booleanConf .createWithDefault(false) val CHAR_AS_VARCHAR = buildConf("spark.sql.charAsVarchar") .doc("When true, Spark replaces CHAR type with VARCHAR type in CREATE/REPLACE/ALTER TABLE " + "commands, so that newly created/updated tables will not have CHAR type columns/fields. " + "Existing tables with CHAR type columns/fields are not affected by this config.") .version("3.3.0") .booleanConf .createWithDefault(false) val CLI_PRINT_HEADER = buildConf("spark.sql.cli.print.header") .doc("When set to true, spark-sql CLI prints the names of the columns in query output.") .version("3.2.0") .booleanConf .createWithDefault(false) val LEGACY_KEEP_COMMAND_OUTPUT_SCHEMA = buildConf("spark.sql.legacy.keepCommandOutputSchema") .internal() .doc("When true, Spark will keep the output schema of commands such as SHOW DATABASES " + "unchanged.") .version("3.0.2") .booleanConf .createWithDefault(false) val LEGACY_INTERVAL_ENABLED = buildConf("spark.sql.legacy.interval.enabled") .internal() .doc("When set to true, Spark SQL uses the mixed legacy interval type `CalendarIntervalType` " + "instead of the ANSI compliant interval types `YearMonthIntervalType` and " + "`DayTimeIntervalType`. For instance, the date subtraction expression returns " + "`CalendarIntervalType` when the SQL config is set to `true` otherwise an ANSI interval.") .version("3.2.0") .booleanConf .createWithDefault(false) val MAX_CONCURRENT_OUTPUT_FILE_WRITERS = buildConf("spark.sql.maxConcurrentOutputFileWriters") .internal() .doc("Maximum number of output file writers to use concurrently. If number of writers " + "needed reaches this limit, task will sort rest of output then writing them.") .version("3.2.0") .intConf .createWithDefault(0) val INFER_NESTED_DICT_AS_STRUCT = buildConf("spark.sql.pyspark.inferNestedDictAsStruct.enabled") .doc("PySpark's SparkSession.createDataFrame infers the nested dict as a map by default. " + "When it set to true, it infers the nested dict as a struct.") .version("3.3.0") .booleanConf .createWithDefault(false) val LEGACY_USE_V1_COMMAND = buildConf("spark.sql.legacy.useV1Command") .internal() .doc("When true, Spark will use legacy V1 SQL commands.") .version("3.3.0") .booleanConf .createWithDefault(false) /** * Holds information about keys that have been deprecated. * * @param key The deprecated key. * @param version Version of Spark where key was deprecated. * @param comment Additional info regarding to the removed config. For example, * reasons of config deprecation, what users should use instead of it. */ case class DeprecatedConfig(key: String, version: String, comment: String) /** * Maps deprecated SQL config keys to information about the deprecation. * * The extra information is logged as a warning when the SQL config is present * in the user's configuration. */ val deprecatedSQLConfigs: Map[String, DeprecatedConfig] = { val configs = Seq( DeprecatedConfig( PANDAS_GROUPED_MAP_ASSIGN_COLUMNS_BY_NAME.key, "2.4", "The config allows to switch to the behaviour before Spark 2.4 " + "and will be removed in the future releases."), DeprecatedConfig(HIVE_VERIFY_PARTITION_PATH.key, "3.0", s"This config is replaced by '${SPARK_IGNORE_MISSING_FILES.key}'."), DeprecatedConfig(ARROW_EXECUTION_ENABLED.key, "3.0", s"Use '${ARROW_PYSPARK_EXECUTION_ENABLED.key}' instead of it."), DeprecatedConfig(ARROW_FALLBACK_ENABLED.key, "3.0", s"Use '${ARROW_PYSPARK_FALLBACK_ENABLED.key}' instead of it."), DeprecatedConfig(SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE.key, "3.0", s"Use '${ADVISORY_PARTITION_SIZE_IN_BYTES.key}' instead of it."), DeprecatedConfig(OPTIMIZER_METADATA_ONLY.key, "3.0", "Avoid to depend on this optimization to prevent a potential correctness issue. " + "If you must use, use 'SparkSessionExtensions' instead to inject it as a custom rule."), DeprecatedConfig(CONVERT_CTAS.key, "3.1", s"Set '${LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT.key}' to false instead."), DeprecatedConfig("spark.sql.sources.schemaStringLengthThreshold", "3.2", s"Use '${HIVE_TABLE_PROPERTY_LENGTH_THRESHOLD.key}' instead."), DeprecatedConfig(PARQUET_INT96_REBASE_MODE_IN_WRITE.alternatives.head, "3.2", s"Use '${PARQUET_INT96_REBASE_MODE_IN_WRITE.key}' instead."), DeprecatedConfig(PARQUET_INT96_REBASE_MODE_IN_READ.alternatives.head, "3.2", s"Use '${PARQUET_INT96_REBASE_MODE_IN_READ.key}' instead."), DeprecatedConfig(PARQUET_REBASE_MODE_IN_WRITE.alternatives.head, "3.2", s"Use '${PARQUET_REBASE_MODE_IN_WRITE.key}' instead."), DeprecatedConfig(PARQUET_REBASE_MODE_IN_READ.alternatives.head, "3.2", s"Use '${PARQUET_REBASE_MODE_IN_READ.key}' instead."), DeprecatedConfig(AVRO_REBASE_MODE_IN_WRITE.alternatives.head, "3.2", s"Use '${AVRO_REBASE_MODE_IN_WRITE.key}' instead."), DeprecatedConfig(AVRO_REBASE_MODE_IN_READ.alternatives.head, "3.2", s"Use '${AVRO_REBASE_MODE_IN_READ.key}' instead."), DeprecatedConfig(LEGACY_REPLACE_DATABRICKS_SPARK_AVRO_ENABLED.key, "3.2", """Use `.format("avro")` in `DataFrameWriter` or `DataFrameReader` instead."""), DeprecatedConfig(COALESCE_PARTITIONS_MIN_PARTITION_NUM.key, "3.2", s"Use '${COALESCE_PARTITIONS_MIN_PARTITION_SIZE.key}' instead.") ) Map(configs.map { cfg => cfg.key -> cfg } : _*) } /** * Holds information about keys that have been removed. * * @param key The removed config key. * @param version Version of Spark where key was removed. * @param defaultValue The default config value. It can be used to notice * users that they set non-default value to an already removed config. * @param comment Additional info regarding to the removed config. */ case class RemovedConfig(key: String, version: String, defaultValue: String, comment: String) /** * The map contains info about removed SQL configs. Keys are SQL config names, * map values contain extra information like the version in which the config was removed, * config's default value and a comment. * * Please, add a removed SQL configuration property here only when it affects behaviours. * For example, `spark.sql.variable.substitute.depth` was not added as it virtually * became no-op later. By this, it makes migrations to new Spark versions painless. */ val removedSQLConfigs: Map[String, RemovedConfig] = { val configs = Seq( RemovedConfig("spark.sql.fromJsonForceNullableSchema", "3.0.0", "true", "It was removed to prevent errors like SPARK-23173 for non-default value."), RemovedConfig( "spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation", "3.0.0", "false", "It was removed to prevent loss of user data for non-default value."), RemovedConfig("spark.sql.legacy.compareDateTimestampInTimestamp", "3.0.0", "true", "It was removed to prevent errors like SPARK-23549 for non-default value."), RemovedConfig("spark.sql.parquet.int64AsTimestampMillis", "3.0.0", "false", "The config was deprecated since Spark 2.3." + s"Use '${PARQUET_OUTPUT_TIMESTAMP_TYPE.key}' instead of it."), RemovedConfig("spark.sql.execution.pandas.respectSessionTimeZone", "3.0.0", "true", "The non-default behavior is considered as a bug, see SPARK-22395. " + "The config was deprecated since Spark 2.3."), RemovedConfig("spark.sql.optimizer.planChangeLog.level", "3.1.0", "trace", s"Please use `${PLAN_CHANGE_LOG_LEVEL.key}` instead."), RemovedConfig("spark.sql.optimizer.planChangeLog.rules", "3.1.0", "", s"Please use `${PLAN_CHANGE_LOG_RULES.key}` instead."), RemovedConfig("spark.sql.optimizer.planChangeLog.batches", "3.1.0", "", s"Please use `${PLAN_CHANGE_LOG_BATCHES.key}` instead.") ) Map(configs.map { cfg => cfg.key -> cfg } : _*) } } /** * A class that enables the setting and getting of mutable config parameters/hints. * * In the presence of a SQLContext, these can be set and queried by passing SET commands * into Spark SQL's query functions (i.e. sql()). Otherwise, users of this class can * modify the hints by programmatically calling the setters and getters of this class. * * SQLConf is thread-safe (internally synchronized, so safe to be used in multiple threads). */ class SQLConf extends Serializable with Logging { import SQLConf._ /** Only low degree of contention is expected for conf, thus NOT using ConcurrentHashMap. */ @transient protected[spark] val settings = java.util.Collections.synchronizedMap( new java.util.HashMap[String, String]()) @transient protected val reader = new ConfigReader(settings) /** ************************ Spark SQL Params/Hints ******************* */ def analyzerMaxIterations: Int = getConf(ANALYZER_MAX_ITERATIONS) def optimizerExcludedRules: Option[String] = getConf(OPTIMIZER_EXCLUDED_RULES) def optimizerMaxIterations: Int = getConf(OPTIMIZER_MAX_ITERATIONS) def optimizerInSetConversionThreshold: Int = getConf(OPTIMIZER_INSET_CONVERSION_THRESHOLD) def optimizerInSetSwitchThreshold: Int = getConf(OPTIMIZER_INSET_SWITCH_THRESHOLD) def planChangeLogLevel: String = getConf(PLAN_CHANGE_LOG_LEVEL) def planChangeRules: Option[String] = getConf(PLAN_CHANGE_LOG_RULES) def planChangeBatches: Option[String] = getConf(PLAN_CHANGE_LOG_BATCHES) def dynamicPartitionPruningEnabled: Boolean = getConf(DYNAMIC_PARTITION_PRUNING_ENABLED) def dynamicPartitionPruningUseStats: Boolean = getConf(DYNAMIC_PARTITION_PRUNING_USE_STATS) def dynamicPartitionPruningFallbackFilterRatio: Double = getConf(DYNAMIC_PARTITION_PRUNING_FALLBACK_FILTER_RATIO) def dynamicPartitionPruningReuseBroadcastOnly: Boolean = getConf(DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY) def stateStoreProviderClass: String = getConf(STATE_STORE_PROVIDER_CLASS) def isStateSchemaCheckEnabled: Boolean = getConf(STATE_SCHEMA_CHECK_ENABLED) def stateStoreMinDeltasForSnapshot: Int = getConf(STATE_STORE_MIN_DELTAS_FOR_SNAPSHOT) def stateStoreFormatValidationEnabled: Boolean = getConf(STATE_STORE_FORMAT_VALIDATION_ENABLED) def checkpointLocation: Option[String] = getConf(CHECKPOINT_LOCATION) def isUnsupportedOperationCheckEnabled: Boolean = getConf(UNSUPPORTED_OPERATION_CHECK_ENABLED) def useDeprecatedKafkaOffsetFetching: Boolean = getConf(USE_DEPRECATED_KAFKA_OFFSET_FETCHING) def statefulOperatorCorrectnessCheckEnabled: Boolean = getConf(STATEFUL_OPERATOR_CHECK_CORRECTNESS_ENABLED) def fileStreamSinkMetadataIgnored: Boolean = getConf(FILESTREAM_SINK_METADATA_IGNORED) def streamingFileCommitProtocolClass: String = getConf(STREAMING_FILE_COMMIT_PROTOCOL_CLASS) def fileSinkLogDeletion: Boolean = getConf(FILE_SINK_LOG_DELETION) def fileSinkLogCompactInterval: Int = getConf(FILE_SINK_LOG_COMPACT_INTERVAL) def fileSinkLogCleanupDelay: Long = getConf(FILE_SINK_LOG_CLEANUP_DELAY) def fileSourceLogDeletion: Boolean = getConf(FILE_SOURCE_LOG_DELETION) def fileSourceLogCompactInterval: Int = getConf(FILE_SOURCE_LOG_COMPACT_INTERVAL) def fileSourceLogCleanupDelay: Long = getConf(FILE_SOURCE_LOG_CLEANUP_DELAY) def streamingSchemaInference: Boolean = getConf(STREAMING_SCHEMA_INFERENCE) def streamingPollingDelay: Long = getConf(STREAMING_POLLING_DELAY) def streamingNoDataProgressEventInterval: Long = getConf(STREAMING_NO_DATA_PROGRESS_EVENT_INTERVAL) def streamingNoDataMicroBatchesEnabled: Boolean = getConf(STREAMING_NO_DATA_MICRO_BATCHES_ENABLED) def streamingMetricsEnabled: Boolean = getConf(STREAMING_METRICS_ENABLED) def streamingProgressRetention: Int = getConf(STREAMING_PROGRESS_RETENTION) def filesMaxPartitionBytes: Long = getConf(FILES_MAX_PARTITION_BYTES) def filesOpenCostInBytes: Long = getConf(FILES_OPEN_COST_IN_BYTES) def filesMinPartitionNum: Option[Int] = getConf(FILES_MIN_PARTITION_NUM) def ignoreCorruptFiles: Boolean = getConf(IGNORE_CORRUPT_FILES) def ignoreMissingFiles: Boolean = getConf(IGNORE_MISSING_FILES) def maxRecordsPerFile: Long = getConf(MAX_RECORDS_PER_FILE) def useCompression: Boolean = getConf(COMPRESS_CACHED) def orcCompressionCodec: String = getConf(ORC_COMPRESSION) def orcVectorizedReaderEnabled: Boolean = getConf(ORC_VECTORIZED_READER_ENABLED) def orcVectorizedReaderBatchSize: Int = getConf(ORC_VECTORIZED_READER_BATCH_SIZE) def orcVectorizedReaderNestedColumnEnabled: Boolean = getConf(ORC_VECTORIZED_READER_NESTED_COLUMN_ENABLED) def parquetCompressionCodec: String = getConf(PARQUET_COMPRESSION) def parquetVectorizedReaderEnabled: Boolean = getConf(PARQUET_VECTORIZED_READER_ENABLED) def parquetVectorizedReaderBatchSize: Int = getConf(PARQUET_VECTORIZED_READER_BATCH_SIZE) def columnBatchSize: Int = getConf(COLUMN_BATCH_SIZE) def cacheVectorizedReaderEnabled: Boolean = getConf(CACHE_VECTORIZED_READER_ENABLED) def defaultNumShufflePartitions: Int = getConf(SHUFFLE_PARTITIONS) def numShufflePartitions: Int = { if (adaptiveExecutionEnabled && coalesceShufflePartitionsEnabled) { getConf(COALESCE_PARTITIONS_INITIAL_PARTITION_NUM).getOrElse(defaultNumShufflePartitions) } else { defaultNumShufflePartitions } } def adaptiveExecutionEnabled: Boolean = getConf(ADAPTIVE_EXECUTION_ENABLED) def adaptiveExecutionLogLevel: String = getConf(ADAPTIVE_EXECUTION_LOG_LEVEL) def fetchShuffleBlocksInBatch: Boolean = getConf(FETCH_SHUFFLE_BLOCKS_IN_BATCH) def nonEmptyPartitionRatioForBroadcastJoin: Double = getConf(NON_EMPTY_PARTITION_RATIO_FOR_BROADCAST_JOIN) def coalesceShufflePartitionsEnabled: Boolean = getConf(COALESCE_PARTITIONS_ENABLED) def minBatchesToRetain: Int = getConf(MIN_BATCHES_TO_RETAIN) def maxBatchesToRetainInMemory: Int = getConf(MAX_BATCHES_TO_RETAIN_IN_MEMORY) def streamingMaintenanceInterval: Long = getConf(STREAMING_MAINTENANCE_INTERVAL) def stateStoreCompressionCodec: String = getConf(STATE_STORE_COMPRESSION_CODEC) def parquetFilterPushDown: Boolean = getConf(PARQUET_FILTER_PUSHDOWN_ENABLED) def parquetFilterPushDownDate: Boolean = getConf(PARQUET_FILTER_PUSHDOWN_DATE_ENABLED) def parquetFilterPushDownTimestamp: Boolean = getConf(PARQUET_FILTER_PUSHDOWN_TIMESTAMP_ENABLED) def parquetFilterPushDownDecimal: Boolean = getConf(PARQUET_FILTER_PUSHDOWN_DECIMAL_ENABLED) def parquetFilterPushDownStringStartWith: Boolean = getConf(PARQUET_FILTER_PUSHDOWN_STRING_STARTSWITH_ENABLED) def parquetFilterPushDownInFilterThreshold: Int = getConf(PARQUET_FILTER_PUSHDOWN_INFILTERTHRESHOLD) def parquetAggregatePushDown: Boolean = getConf(PARQUET_AGGREGATE_PUSHDOWN_ENABLED) def orcFilterPushDown: Boolean = getConf(ORC_FILTER_PUSHDOWN_ENABLED) def orcAggregatePushDown: Boolean = getConf(ORC_AGGREGATE_PUSHDOWN_ENABLED) def isOrcSchemaMergingEnabled: Boolean = getConf(ORC_SCHEMA_MERGING_ENABLED) def verifyPartitionPath: Boolean = getConf(HIVE_VERIFY_PARTITION_PATH) def metastorePartitionPruning: Boolean = getConf(HIVE_METASTORE_PARTITION_PRUNING) def metastorePartitionPruningInSetThreshold: Int = getConf(HIVE_METASTORE_PARTITION_PRUNING_INSET_THRESHOLD) def metastorePartitionPruningFallbackOnException: Boolean = getConf(HIVE_METASTORE_PARTITION_PRUNING_FALLBACK_ON_EXCEPTION) def metastorePartitionPruningFastFallback: Boolean = getConf(HIVE_METASTORE_PARTITION_PRUNING_FAST_FALLBACK) def manageFilesourcePartitions: Boolean = getConf(HIVE_MANAGE_FILESOURCE_PARTITIONS) def filesourcePartitionFileCacheSize: Long = getConf(HIVE_FILESOURCE_PARTITION_FILE_CACHE_SIZE) def caseSensitiveInferenceMode: HiveCaseSensitiveInferenceMode.Value = HiveCaseSensitiveInferenceMode.withName(getConf(HIVE_CASE_SENSITIVE_INFERENCE)) def gatherFastStats: Boolean = getConf(GATHER_FASTSTAT) def optimizerMetadataOnly: Boolean = getConf(OPTIMIZER_METADATA_ONLY) def wholeStageEnabled: Boolean = getConf(WHOLESTAGE_CODEGEN_ENABLED) def wholeStageUseIdInClassName: Boolean = getConf(WHOLESTAGE_CODEGEN_USE_ID_IN_CLASS_NAME) def wholeStageMaxNumFields: Int = getConf(WHOLESTAGE_MAX_NUM_FIELDS) def codegenFallback: Boolean = getConf(CODEGEN_FALLBACK) def codegenComments: Boolean = getConf(StaticSQLConf.CODEGEN_COMMENTS) def loggingMaxLinesForCodegen: Int = getConf(CODEGEN_LOGGING_MAX_LINES) def hugeMethodLimit: Int = getConf(WHOLESTAGE_HUGE_METHOD_LIMIT) def methodSplitThreshold: Int = getConf(CODEGEN_METHOD_SPLIT_THRESHOLD) def wholeStageSplitConsumeFuncByOperator: Boolean = getConf(WHOLESTAGE_SPLIT_CONSUME_FUNC_BY_OPERATOR) def tableRelationCacheSize: Int = getConf(StaticSQLConf.FILESOURCE_TABLE_RELATION_CACHE_SIZE) def codegenCacheMaxEntries: Int = getConf(StaticSQLConf.CODEGEN_CACHE_MAX_ENTRIES) def exchangeReuseEnabled: Boolean = getConf(EXCHANGE_REUSE_ENABLED) def subqueryReuseEnabled: Boolean = getConf(SUBQUERY_REUSE_ENABLED) def caseSensitiveAnalysis: Boolean = getConf(SQLConf.CASE_SENSITIVE) def constraintPropagationEnabled: Boolean = getConf(CONSTRAINT_PROPAGATION_ENABLED) def escapedStringLiterals: Boolean = getConf(ESCAPED_STRING_LITERALS) def fileCompressionFactor: Double = getConf(FILE_COMPRESSION_FACTOR) def stringRedactionPattern: Option[Regex] = getConf(SQL_STRING_REDACTION_PATTERN) def sortBeforeRepartition: Boolean = getConf(SORT_BEFORE_REPARTITION) def topKSortFallbackThreshold: Int = getConf(TOP_K_SORT_FALLBACK_THRESHOLD) def fastHashAggregateRowMaxCapacityBit: Int = getConf(FAST_HASH_AGGREGATE_MAX_ROWS_CAPACITY_BIT) def streamingSessionWindowMergeSessionInLocalPartition: Boolean = getConf(STREAMING_SESSION_WINDOW_MERGE_SESSIONS_IN_LOCAL_PARTITION) def datetimeJava8ApiEnabled: Boolean = getConf(DATETIME_JAVA8API_ENABLED) def uiExplainMode: String = getConf(UI_EXPLAIN_MODE) def addSingleFileInAddFile: Boolean = getConf(LEGACY_ADD_SINGLE_FILE_IN_ADD_FILE) def legacyMsSqlServerNumericMappingEnabled: Boolean = getConf(LEGACY_MSSQLSERVER_NUMERIC_MAPPING_ENABLED) def legacyTimeParserPolicy: LegacyBehaviorPolicy.Value = { LegacyBehaviorPolicy.withName(getConf(SQLConf.LEGACY_TIME_PARSER_POLICY)) } def broadcastHashJoinOutputPartitioningExpandLimit: Int = getConf(BROADCAST_HASH_JOIN_OUTPUT_PARTITIONING_EXPAND_LIMIT) /** * Returns the [[Resolver]] for the current configuration, which can be used to determine if two * identifiers are equal. */ def resolver: Resolver = { if (caseSensitiveAnalysis) { org.apache.spark.sql.catalyst.analysis.caseSensitiveResolution } else { org.apache.spark.sql.catalyst.analysis.caseInsensitiveResolution } } /** * Returns the error handler for handling hint errors. */ def hintErrorHandler: HintErrorHandler = HintErrorLogger def subexpressionEliminationEnabled: Boolean = getConf(SUBEXPRESSION_ELIMINATION_ENABLED) def subexpressionEliminationCacheMaxEntries: Int = getConf(SUBEXPRESSION_ELIMINATION_CACHE_MAX_ENTRIES) def autoBroadcastJoinThreshold: Long = getConf(AUTO_BROADCASTJOIN_THRESHOLD) def limitScaleUpFactor: Int = getConf(LIMIT_SCALE_UP_FACTOR) def advancedPartitionPredicatePushdownEnabled: Boolean = getConf(ADVANCED_PARTITION_PREDICATE_PUSHDOWN) def preferSortMergeJoin: Boolean = getConf(PREFER_SORTMERGEJOIN) def enableRadixSort: Boolean = getConf(RADIX_SORT_ENABLED) def isParquetSchemaMergingEnabled: Boolean = getConf(PARQUET_SCHEMA_MERGING_ENABLED) def isParquetSchemaRespectSummaries: Boolean = getConf(PARQUET_SCHEMA_RESPECT_SUMMARIES) def parquetOutputCommitterClass: String = getConf(PARQUET_OUTPUT_COMMITTER_CLASS) def isParquetBinaryAsString: Boolean = getConf(PARQUET_BINARY_AS_STRING) def isParquetINT96AsTimestamp: Boolean = getConf(PARQUET_INT96_AS_TIMESTAMP) def isParquetINT96TimestampConversion: Boolean = getConf(PARQUET_INT96_TIMESTAMP_CONVERSION) def parquetOutputTimestampType: ParquetOutputTimestampType.Value = { ParquetOutputTimestampType.withName(getConf(PARQUET_OUTPUT_TIMESTAMP_TYPE)) } def writeLegacyParquetFormat: Boolean = getConf(PARQUET_WRITE_LEGACY_FORMAT) def parquetRecordFilterEnabled: Boolean = getConf(PARQUET_RECORD_FILTER_ENABLED) def inMemoryPartitionPruning: Boolean = getConf(IN_MEMORY_PARTITION_PRUNING) def inMemoryTableScanStatisticsEnabled: Boolean = getConf(IN_MEMORY_TABLE_SCAN_STATISTICS_ENABLED) def offHeapColumnVectorEnabled: Boolean = getConf(COLUMN_VECTOR_OFFHEAP_ENABLED) def columnNameOfCorruptRecord: String = getConf(COLUMN_NAME_OF_CORRUPT_RECORD) def broadcastTimeout: Long = { val timeoutValue = getConf(BROADCAST_TIMEOUT) if (timeoutValue < 0) Long.MaxValue else timeoutValue } def defaultDataSourceName: String = getConf(DEFAULT_DATA_SOURCE_NAME) def convertCTAS: Boolean = getConf(CONVERT_CTAS) def partitionColumnTypeInferenceEnabled: Boolean = getConf(SQLConf.PARTITION_COLUMN_TYPE_INFERENCE) def fileCommitProtocolClass: String = getConf(SQLConf.FILE_COMMIT_PROTOCOL_CLASS) def parallelPartitionDiscoveryThreshold: Int = getConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD) def parallelPartitionDiscoveryParallelism: Int = getConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_PARALLELISM) def bucketingEnabled: Boolean = getConf(SQLConf.BUCKETING_ENABLED) def bucketingMaxBuckets: Int = getConf(SQLConf.BUCKETING_MAX_BUCKETS) def autoBucketedScanEnabled: Boolean = getConf(SQLConf.AUTO_BUCKETED_SCAN_ENABLED) def dataFrameSelfJoinAutoResolveAmbiguity: Boolean = getConf(DATAFRAME_SELF_JOIN_AUTO_RESOLVE_AMBIGUITY) def dataFrameRetainGroupColumns: Boolean = getConf(DATAFRAME_RETAIN_GROUP_COLUMNS) def dataFramePivotMaxValues: Int = getConf(DATAFRAME_PIVOT_MAX_VALUES) def runSQLonFile: Boolean = getConf(RUN_SQL_ON_FILES) def enableTwoLevelAggMap: Boolean = getConf(ENABLE_TWOLEVEL_AGG_MAP) def enableVectorizedHashMap: Boolean = getConf(ENABLE_VECTORIZED_HASH_MAP) def useObjectHashAggregation: Boolean = getConf(USE_OBJECT_HASH_AGG) def objectAggSortBasedFallbackThreshold: Int = getConf(OBJECT_AGG_SORT_BASED_FALLBACK_THRESHOLD) def variableSubstituteEnabled: Boolean = getConf(VARIABLE_SUBSTITUTE_ENABLED) def warehousePath: String = new Path(getConf(StaticSQLConf.WAREHOUSE_PATH)).toString def hiveThriftServerSingleSession: Boolean = getConf(StaticSQLConf.HIVE_THRIFT_SERVER_SINGLESESSION) def orderByOrdinal: Boolean = getConf(ORDER_BY_ORDINAL) def groupByOrdinal: Boolean = getConf(GROUP_BY_ORDINAL) def groupByAliases: Boolean = getConf(GROUP_BY_ALIASES) def crossJoinEnabled: Boolean = getConf(SQLConf.CROSS_JOINS_ENABLED) def sessionLocalTimeZone: String = getConf(SQLConf.SESSION_LOCAL_TIMEZONE) def jsonGeneratorIgnoreNullFields: Boolean = getConf(SQLConf.JSON_GENERATOR_IGNORE_NULL_FIELDS) def jsonExpressionOptimization: Boolean = getConf(SQLConf.JSON_EXPRESSION_OPTIMIZATION) def csvExpressionOptimization: Boolean = getConf(SQLConf.CSV_EXPRESSION_OPTIMIZATION) def parallelFileListingInStatsComputation: Boolean = getConf(SQLConf.PARALLEL_FILE_LISTING_IN_STATS_COMPUTATION) def fallBackToHdfsForStatsEnabled: Boolean = getConf(ENABLE_FALL_BACK_TO_HDFS_FOR_STATS) def defaultSizeInBytes: Long = getConf(DEFAULT_SIZE_IN_BYTES) def ndvMaxError: Double = getConf(NDV_MAX_ERROR) def histogramEnabled: Boolean = getConf(HISTOGRAM_ENABLED) def histogramNumBins: Int = getConf(HISTOGRAM_NUM_BINS) def percentileAccuracy: Int = getConf(PERCENTILE_ACCURACY) def cboEnabled: Boolean = getConf(SQLConf.CBO_ENABLED) def planStatsEnabled: Boolean = getConf(SQLConf.PLAN_STATS_ENABLED) def autoSizeUpdateEnabled: Boolean = getConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED) def joinReorderEnabled: Boolean = getConf(SQLConf.JOIN_REORDER_ENABLED) def joinReorderDPThreshold: Int = getConf(SQLConf.JOIN_REORDER_DP_THRESHOLD) def joinReorderCardWeight: Double = getConf(SQLConf.JOIN_REORDER_CARD_WEIGHT) def joinReorderDPStarFilter: Boolean = getConf(SQLConf.JOIN_REORDER_DP_STAR_FILTER) def windowExecBufferInMemoryThreshold: Int = getConf(WINDOW_EXEC_BUFFER_IN_MEMORY_THRESHOLD) def windowExecBufferSpillThreshold: Int = getConf(WINDOW_EXEC_BUFFER_SPILL_THRESHOLD) def sessionWindowBufferInMemoryThreshold: Int = getConf(SESSION_WINDOW_BUFFER_IN_MEMORY_THRESHOLD) def sessionWindowBufferSpillThreshold: Int = getConf(SESSION_WINDOW_BUFFER_SPILL_THRESHOLD) def sortMergeJoinExecBufferInMemoryThreshold: Int = getConf(SORT_MERGE_JOIN_EXEC_BUFFER_IN_MEMORY_THRESHOLD) def sortMergeJoinExecBufferSpillThreshold: Int = getConf(SORT_MERGE_JOIN_EXEC_BUFFER_SPILL_THRESHOLD) def cartesianProductExecBufferInMemoryThreshold: Int = getConf(CARTESIAN_PRODUCT_EXEC_BUFFER_IN_MEMORY_THRESHOLD) def cartesianProductExecBufferSpillThreshold: Int = getConf(CARTESIAN_PRODUCT_EXEC_BUFFER_SPILL_THRESHOLD) def codegenSplitAggregateFunc: Boolean = getConf(SQLConf.CODEGEN_SPLIT_AGGREGATE_FUNC) def maxNestedViewDepth: Int = getConf(SQLConf.MAX_NESTED_VIEW_DEPTH) def useCurrentSQLConfigsForView: Boolean = getConf(SQLConf.USE_CURRENT_SQL_CONFIGS_FOR_VIEW) def storeAnalyzedPlanForView: Boolean = getConf(SQLConf.STORE_ANALYZED_PLAN_FOR_VIEW) def allowAutoGeneratedAliasForView: Boolean = getConf(SQLConf.ALLOW_AUTO_GENERATED_ALIAS_FOR_VEW) def allowStarWithSingleTableIdentifierInCount: Boolean = getConf(SQLConf.ALLOW_STAR_WITH_SINGLE_TABLE_IDENTIFIER_IN_COUNT) def allowNonEmptyLocationInCTAS: Boolean = getConf(SQLConf.ALLOW_NON_EMPTY_LOCATION_IN_CTAS) def starSchemaDetection: Boolean = getConf(STARSCHEMA_DETECTION) def starSchemaFTRatio: Double = getConf(STARSCHEMA_FACT_TABLE_RATIO) def supportQuotedRegexColumnName: Boolean = getConf(SUPPORT_QUOTED_REGEX_COLUMN_NAME) def rangeExchangeSampleSizePerPartition: Int = getConf(RANGE_EXCHANGE_SAMPLE_SIZE_PER_PARTITION) def arrowPySparkEnabled: Boolean = getConf(ARROW_PYSPARK_EXECUTION_ENABLED) def arrowPySparkSelfDestructEnabled: Boolean = getConf(ARROW_PYSPARK_SELF_DESTRUCT_ENABLED) def pysparkJVMStacktraceEnabled: Boolean = getConf(PYSPARK_JVM_STACKTRACE_ENABLED) def arrowSparkREnabled: Boolean = getConf(ARROW_SPARKR_EXECUTION_ENABLED) def arrowPySparkFallbackEnabled: Boolean = getConf(ARROW_PYSPARK_FALLBACK_ENABLED) def arrowMaxRecordsPerBatch: Int = getConf(ARROW_EXECUTION_MAX_RECORDS_PER_BATCH) def pandasUDFBufferSize: Int = getConf(PANDAS_UDF_BUFFER_SIZE) def pysparkSimplifiedTraceback: Boolean = getConf(PYSPARK_SIMPLIFIEID_TRACEBACK) def pandasGroupedMapAssignColumnsByName: Boolean = getConf(SQLConf.PANDAS_GROUPED_MAP_ASSIGN_COLUMNS_BY_NAME) def arrowSafeTypeConversion: Boolean = getConf(SQLConf.PANDAS_ARROW_SAFE_TYPE_CONVERSION) def replaceExceptWithFilter: Boolean = getConf(REPLACE_EXCEPT_WITH_FILTER) def decimalOperationsAllowPrecisionLoss: Boolean = getConf(DECIMAL_OPERATIONS_ALLOW_PREC_LOSS) def literalPickMinimumPrecision: Boolean = getConf(LITERAL_PICK_MINIMUM_PRECISION) def continuousStreamingEpochBacklogQueueSize: Int = getConf(CONTINUOUS_STREAMING_EPOCH_BACKLOG_QUEUE_SIZE) def continuousStreamingExecutorQueueSize: Int = getConf(CONTINUOUS_STREAMING_EXECUTOR_QUEUE_SIZE) def continuousStreamingExecutorPollIntervalMs: Long = getConf(CONTINUOUS_STREAMING_EXECUTOR_POLL_INTERVAL_MS) def disabledV2StreamingWriters: String = getConf(DISABLED_V2_STREAMING_WRITERS) def disabledV2StreamingMicroBatchReaders: String = getConf(DISABLED_V2_STREAMING_MICROBATCH_READERS) def fastFailFileFormatOutput: Boolean = getConf(FASTFAIL_ON_FILEFORMAT_OUTPUT) def concatBinaryAsString: Boolean = getConf(CONCAT_BINARY_AS_STRING) def eltOutputAsString: Boolean = getConf(ELT_OUTPUT_AS_STRING) def validatePartitionColumns: Boolean = getConf(VALIDATE_PARTITION_COLUMNS) def partitionOverwriteMode: PartitionOverwriteMode.Value = PartitionOverwriteMode.withName(getConf(PARTITION_OVERWRITE_MODE)) def storeAssignmentPolicy: StoreAssignmentPolicy.Value = StoreAssignmentPolicy.withName(getConf(STORE_ASSIGNMENT_POLICY)) def ansiEnabled: Boolean = getConf(ANSI_ENABLED) def enforceReservedKeywords: Boolean = ansiEnabled && getConf(ENFORCE_RESERVED_KEYWORDS) def strictIndexOperator: Boolean = ansiEnabled && getConf(ANSI_STRICT_INDEX_OPERATOR) def timestampType: AtomicType = getConf(TIMESTAMP_TYPE) match { case "TIMESTAMP_LTZ" => // For historical reason, the TimestampType maps to TIMESTAMP WITH LOCAL TIME ZONE TimestampType case "TIMESTAMP_NTZ" => TimestampNTZType } def nestedSchemaPruningEnabled: Boolean = getConf(NESTED_SCHEMA_PRUNING_ENABLED) def serializerNestedSchemaPruningEnabled: Boolean = getConf(SERIALIZER_NESTED_SCHEMA_PRUNING_ENABLED) def nestedPruningOnExpressions: Boolean = getConf(NESTED_PRUNING_ON_EXPRESSIONS) def csvColumnPruning: Boolean = getConf(SQLConf.CSV_PARSER_COLUMN_PRUNING) def legacySizeOfNull: Boolean = { // size(null) should return null under ansi mode. getConf(SQLConf.LEGACY_SIZE_OF_NULL) && !getConf(ANSI_ENABLED) } def isReplEagerEvalEnabled: Boolean = getConf(SQLConf.REPL_EAGER_EVAL_ENABLED) def replEagerEvalMaxNumRows: Int = getConf(SQLConf.REPL_EAGER_EVAL_MAX_NUM_ROWS) def replEagerEvalTruncate: Int = getConf(SQLConf.REPL_EAGER_EVAL_TRUNCATE) def avroCompressionCodec: String = getConf(SQLConf.AVRO_COMPRESSION_CODEC) def avroDeflateLevel: Int = getConf(SQLConf.AVRO_DEFLATE_LEVEL) def replaceDatabricksSparkAvroEnabled: Boolean = getConf(SQLConf.LEGACY_REPLACE_DATABRICKS_SPARK_AVRO_ENABLED) def setOpsPrecedenceEnforced: Boolean = getConf(SQLConf.LEGACY_SETOPS_PRECEDENCE_ENABLED) def exponentLiteralAsDecimalEnabled: Boolean = getConf(SQLConf.LEGACY_EXPONENT_LITERAL_AS_DECIMAL_ENABLED) def allowNegativeScaleOfDecimalEnabled: Boolean = getConf(SQLConf.LEGACY_ALLOW_NEGATIVE_SCALE_OF_DECIMAL_ENABLED) def legacyStatisticalAggregate: Boolean = getConf(SQLConf.LEGACY_STATISTICAL_AGGREGATE) def truncateTableIgnorePermissionAcl: Boolean = getConf(SQLConf.TRUNCATE_TABLE_IGNORE_PERMISSION_ACL) def nameNonStructGroupingKeyAsValue: Boolean = getConf(SQLConf.NAME_NON_STRUCT_GROUPING_KEY_AS_VALUE) def maxToStringFields: Int = getConf(SQLConf.MAX_TO_STRING_FIELDS) def maxPlanStringLength: Int = getConf(SQLConf.MAX_PLAN_STRING_LENGTH).toInt def maxMetadataStringLength: Int = getConf(SQLConf.MAX_METADATA_STRING_LENGTH) def setCommandRejectsSparkCoreConfs: Boolean = getConf(SQLConf.SET_COMMAND_REJECTS_SPARK_CORE_CONFS) def castDatetimeToString: Boolean = getConf(SQLConf.LEGACY_CAST_DATETIME_TO_STRING) def ignoreDataLocality: Boolean = getConf(SQLConf.IGNORE_DATA_LOCALITY) def csvFilterPushDown: Boolean = getConf(CSV_FILTER_PUSHDOWN_ENABLED) def jsonFilterPushDown: Boolean = getConf(JSON_FILTER_PUSHDOWN_ENABLED) def avroFilterPushDown: Boolean = getConf(AVRO_FILTER_PUSHDOWN_ENABLED) def integerGroupingIdEnabled: Boolean = getConf(SQLConf.LEGACY_INTEGER_GROUPING_ID) def metadataCacheTTL: Long = getConf(StaticSQLConf.METADATA_CACHE_TTL_SECONDS) def coalesceBucketsInJoinEnabled: Boolean = getConf(SQLConf.COALESCE_BUCKETS_IN_JOIN_ENABLED) def coalesceBucketsInJoinMaxBucketRatio: Int = getConf(SQLConf.COALESCE_BUCKETS_IN_JOIN_MAX_BUCKET_RATIO) def optimizeNullAwareAntiJoin: Boolean = getConf(SQLConf.OPTIMIZE_NULL_AWARE_ANTI_JOIN) def legacyPathOptionBehavior: Boolean = getConf(SQLConf.LEGACY_PATH_OPTION_BEHAVIOR) def disabledJdbcConnectionProviders: String = getConf( StaticSQLConf.DISABLED_JDBC_CONN_PROVIDER_LIST) def charVarcharAsString: Boolean = getConf(SQLConf.LEGACY_CHAR_VARCHAR_AS_STRING) def cliPrintHeader: Boolean = getConf(SQLConf.CLI_PRINT_HEADER) def legacyIntervalEnabled: Boolean = getConf(LEGACY_INTERVAL_ENABLED) def decorrelateInnerQueryEnabled: Boolean = getConf(SQLConf.DECORRELATE_INNER_QUERY_ENABLED) def maxConcurrentOutputFileWriters: Int = getConf(SQLConf.MAX_CONCURRENT_OUTPUT_FILE_WRITERS) def inferDictAsStruct: Boolean = getConf(SQLConf.INFER_NESTED_DICT_AS_STRUCT) def parquetFieldIdReadEnabled: Boolean = getConf(SQLConf.PARQUET_FIELD_ID_READ_ENABLED) def parquetFieldIdWriteEnabled: Boolean = getConf(SQLConf.PARQUET_FIELD_ID_WRITE_ENABLED) def ignoreMissingParquetFieldId: Boolean = getConf(SQLConf.IGNORE_MISSING_PARQUET_FIELD_ID) def useV1Command: Boolean = getConf(SQLConf.LEGACY_USE_V1_COMMAND) /** ********************** SQLConf functionality methods ************ */ /** Set Spark SQL configuration properties. */ def setConf(props: Properties): Unit = settings.synchronized { props.asScala.foreach { case (k, v) => setConfString(k, v) } } /** Set the given Spark SQL configuration property using a `string` value. */ def setConfString(key: String, value: String): Unit = { require(key != null, "key cannot be null") require(value != null, s"value cannot be null for key: $key") val entry = getConfigEntry(key) if (entry != null) { // Only verify configs in the SQLConf object entry.valueConverter(value) } setConfWithCheck(key, value) } /** Set the given Spark SQL configuration property. */ def setConf[T](entry: ConfigEntry[T], value: T): Unit = { require(entry != null, "entry cannot be null") require(value != null, s"value cannot be null for key: ${entry.key}") require(containsConfigEntry(entry), s"$entry is not registered") setConfWithCheck(entry.key, entry.stringConverter(value)) } /** Return the value of Spark SQL configuration property for the given key. */ @throws[NoSuchElementException]("if key is not set") def getConfString(key: String): String = { Option(settings.get(key)). orElse { // Try to use the default value Option(getConfigEntry(key)).map { e => e.stringConverter(e.readFrom(reader)) } }. getOrElse(throw QueryExecutionErrors.noSuchElementExceptionError(key)) } /** * Return the value of Spark SQL configuration property for the given key. If the key is not set * yet, return `defaultValue`. This is useful when `defaultValue` in ConfigEntry is not the * desired one. */ def getConf[T](entry: ConfigEntry[T], defaultValue: T): T = { require(containsConfigEntry(entry), s"$entry is not registered") Option(settings.get(entry.key)).map(entry.valueConverter).getOrElse(defaultValue) } /** * Return the value of Spark SQL configuration property for the given key. If the key is not set * yet, return `defaultValue` in [[ConfigEntry]]. */ def getConf[T](entry: ConfigEntry[T]): T = { require(containsConfigEntry(entry), s"$entry is not registered") entry.readFrom(reader) } /** * Return the value of an optional Spark SQL configuration property for the given key. If the key * is not set yet, returns None. */ def getConf[T](entry: OptionalConfigEntry[T]): Option[T] = { require(containsConfigEntry(entry), s"$entry is not registered") entry.readFrom(reader) } /** * Return the `string` value of Spark SQL configuration property for the given key. If the key is * not set yet, return `defaultValue`. */ def getConfString(key: String, defaultValue: String): String = { Option(settings.get(key)).getOrElse { // If the key is not set, need to check whether the config entry is registered and is // a fallback conf, so that we can check its parent. getConfigEntry(key) match { case e: FallbackConfigEntry[_] => getConfString(e.fallback.key, defaultValue) case e: ConfigEntry[_] if defaultValue != null && defaultValue != ConfigEntry.UNDEFINED => // Only verify configs in the SQLConf object e.valueConverter(defaultValue) defaultValue case _ => defaultValue } } } private var definedConfsLoaded = false /** * Init [[StaticSQLConf]] and [[org.apache.spark.sql.hive.HiveUtils]] so that all the defined * SQL Configurations will be registered to SQLConf */ private def loadDefinedConfs(): Unit = { if (!definedConfsLoaded) { definedConfsLoaded = true // Force to register static SQL configurations StaticSQLConf try { // Force to register SQL configurations from Hive module val symbol = ScalaReflection.mirror.staticModule("org.apache.spark.sql.hive.HiveUtils") ScalaReflection.mirror.reflectModule(symbol).instance } catch { case NonFatal(e) => logWarning("SQL configurations from Hive module is not loaded", e) } } } /** * Return all the configuration properties that have been set (i.e. not the default). * This creates a new copy of the config properties in the form of a Map. */ def getAllConfs: immutable.Map[String, String] = settings.synchronized { settings.asScala.toMap } /** * Return all the configuration definitions that have been defined in [[SQLConf]]. Each * definition contains key, defaultValue and doc. */ def getAllDefinedConfs: Seq[(String, String, String, String)] = { loadDefinedConfs() getConfigEntries().asScala.filter(_.isPublic).map { entry => val displayValue = Option(getConfString(entry.key, null)).getOrElse(entry.defaultValueString) (entry.key, displayValue, entry.doc, entry.version) }.toSeq } /** * Redacts the given option map according to the description of SQL_OPTIONS_REDACTION_PATTERN. */ def redactOptions[K, V](options: Map[K, V]): Map[K, V] = { redactOptions(options.toSeq).toMap } /** * Redacts the given option map according to the description of SQL_OPTIONS_REDACTION_PATTERN. */ def redactOptions[K, V](options: Seq[(K, V)]): Seq[(K, V)] = { val regexes = Seq( getConf(SQL_OPTIONS_REDACTION_PATTERN), SECRET_REDACTION_PATTERN.readFrom(reader)) regexes.foldLeft(options) { case (opts, r) => Utils.redact(Some(r), opts) } } /** * Return whether a given key is set in this [[SQLConf]]. */ def contains(key: String): Boolean = { settings.containsKey(key) } /** * Logs a warning message if the given config key is deprecated. */ private def logDeprecationWarning(key: String): Unit = { SQLConf.deprecatedSQLConfigs.get(key).foreach { case DeprecatedConfig(configName, version, comment) => logWarning( s"The SQL config '$configName' has been deprecated in Spark v$version " + s"and may be removed in the future. $comment") } } private def requireDefaultValueOfRemovedConf(key: String, value: String): Unit = { SQLConf.removedSQLConfigs.get(key).foreach { case RemovedConfig(configName, version, defaultValue, comment) => if (value != defaultValue) { throw QueryCompilationErrors.configRemovedInVersionError(configName, version, comment) } } } protected def setConfWithCheck(key: String, value: String): Unit = { logDeprecationWarning(key) requireDefaultValueOfRemovedConf(key, value) settings.put(key, value) } def unsetConf(key: String): Unit = { logDeprecationWarning(key) settings.remove(key) } def unsetConf(entry: ConfigEntry[_]): Unit = { unsetConf(entry.key) } def clear(): Unit = { settings.clear() } override def clone(): SQLConf = { val result = new SQLConf getAllConfs.foreach { case(k, v) => if (v ne null) result.setConfString(k, v) } result } // For test only def copy(entries: (ConfigEntry[_], Any)*): SQLConf = { val cloned = clone() entries.foreach { case (entry, value) => cloned.setConfString(entry.key, value.toString) } cloned } def isModifiable(key: String): Boolean = { containsConfigKey(key) && !isStaticConfigKey(key) } }
gengliangwang/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
Scala
apache-2.0
203,305
/* * @author Philip Stutz * * Copyright 2014 University of Zurich * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.signalcollect.triplerush.sparql import org.scalatest.FlatSpec import org.scalatest.Matchers import com.signalcollect.triplerush.TestAnnouncements import com.signalcollect.triplerush.TripleRush import scala.concurrent.Await import scala.concurrent.duration._ import com.hp.hpl.jena.query.QueryFactory import com.hp.hpl.jena.rdf.model.ModelFactory import com.hp.hpl.jena.query.QueryExecutionFactory import com.hp.hpl.jena.rdf.model.ResourceFactory import collection.JavaConversions._ class OrderByPlusLimitSpec extends FlatSpec with Matchers with TestAnnouncements { "ORDER BY and LIMIT" should "return the same results as Jena for IRIs" in { val sparql = """ PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> SELECT ?product ?label WHERE { ?product rdfs:label ?label . } ORDER BY ?label LIMIT 3 """ implicit val tr = new TripleRush try { tr.addTriple("http://SomeProduct2", "http://www.w3.org/2000/01/rdf-schema#label", "http://B") tr.addTriple("http://SomeProduct3", "http://www.w3.org/2000/01/rdf-schema#label", "http://C") tr.addTriple("http://SomeProduct4", "http://www.w3.org/2000/01/rdf-schema#label", "http://D") tr.addTriple("http://SomeProduct5", "http://www.w3.org/2000/01/rdf-schema#label", "http://E") tr.addTriple("http://SomeProduct1", "http://www.w3.org/2000/01/rdf-schema#label", "http://A") tr.prepareExecution println("End prepare") val query = Sparql(sparql).get // val jena = ModelFactory.createDefaultModel // val label = ResourceFactory.createProperty("http://www.w3.org/2000/01/rdf-schema#label") // val p1 = ResourceFactory.createResource("http://SomeProduct1") // val p2 = ResourceFactory.createResource("http://SomeProduct2") // val p3 = ResourceFactory.createResource("http://SomeProduct3") // val p4 = ResourceFactory.createResource("http://SomeProduct4") // val p5 = ResourceFactory.createResource("http://SomeProduct5") // val a = ResourceFactory.createResource("http://A") // val b = ResourceFactory.createResource("http://B") // val c = ResourceFactory.createResource("http://C") // val d = ResourceFactory.createResource("http://D") // val e = ResourceFactory.createResource("http://E") // jena.add(ResourceFactory.createStatement(p1, label, a)) // jena.add(ResourceFactory.createStatement(p2, label, b)) // jena.add(ResourceFactory.createStatement(p3, label, c)) // jena.add(ResourceFactory.createStatement(p4, label, d)) // jena.add(ResourceFactory.createStatement(p5, label, e)) // val jenaQuery = QueryFactory.create(sparql) // val jenaQueryExecution = QueryExecutionFactory.create(jenaQuery, jena) // val jenaResultIterator = jenaQueryExecution.execSelect // println(jenaResultIterator.toList.map(_.get("label"))) val result = query.resultIterator.map(bindings => (bindings("label"), bindings("product"))).toList assert(result === List(("http://A", "http://SomeProduct1"), ("http://B", "http://SomeProduct2"), ("http://C", "http://SomeProduct3"))) } finally { tr.shutdown } } }
jacqueslk/triplerush-filter
src/test/scala/com/signalcollect/triplerush/sparql/OrderByPlusLimitSpec.scala
Scala
apache-2.0
3,920
package rpgboss.editor.uibase import SwingUtils._ import scala.swing._ import rpgboss.model.event.IntParameter import rpgboss.model.event.EventParameterValueType import rpgboss.model.HasName import scala.swing.event.MouseClicked import rpgboss.model.event.EventParameter import rpgboss.lib.Utils import rpgboss.model.event.EventCmd import rpgboss.model.ProjectData import rpgboss.editor.Internationalized._ class ParameterDialog[T]( owner: Window, initial: EventParameter[T], onOk: EventParameter[T] => Unit) (implicit m: reflect.Manifest[EventParameter[T]]) extends StdDialog(owner, getMessage("Parameter")) { val model = Utils.deepCopy(initial) val fLocalVariable = textField(model.localVariable, model.localVariable = _) val fGlobalVariable = textField(model.globalVariable, model.globalVariable = _) def updateFields() = { fLocalVariable.enabled = model.valueTypeId == EventParameterValueType.LocalVariable.id fGlobalVariable.enabled = model.valueTypeId == EventParameterValueType.GlobalVariable.id } updateFields() val valueTypeBtns = enumIdRadios(EventParameterValueType)( model.valueTypeId, v => { model.valueTypeId = v updateFields() }) val group = makeButtonGroup(valueTypeBtns) override def okFunc(): Unit = { import EventParameterValueType._ if (model.valueTypeId == LocalVariable.id && model.localVariable.isEmpty) { Dialog.showMessage( fLocalVariable, getMessage("Local_Variable_Cannot_Be_Blank"), getMessage("Validation_Error"), Dialog.Message.Error) return } if (model.supportsGlobalVariable && model.valueTypeId == GlobalVariable.id && model.globalVariable.isEmpty) { Dialog.showMessage( fGlobalVariable, getMessage("Global_Variable_Cannot_Be_Blank"), getMessage("Validation_Error"), Dialog.Message.Error) return } onOk(model) close() } contents = new DesignGridPanel { row().grid().add(valueTypeBtns(0)) row().grid().add(Swing.HGlue) row().grid().add(valueTypeBtns(1)) row().grid().add(fLocalVariable) if (model.supportsGlobalVariable) { row().grid().add(valueTypeBtns(2)) row().grid().add(fGlobalVariable) } addButtons(okBtn, cancelBtn) } } class ParameterFullComponent[T]( owner: Window, field: EventParameterField[T]) (implicit m: reflect.Manifest[EventParameter[T]]) extends BoxPanel(Orientation.Horizontal) { val model = field.model val component = field.getModelComponent() val container = new BoxPanel(Orientation.Horizontal) val detailsBtn = new Button(Action("...") { val d: ParameterDialog[T] = new ParameterDialog[T]( owner, model, newModel => { model.copyValuesFrom(newModel) updateContainer() }) d.open() }) val fConstant = component val label = new TextField { editable = false enabled = true listenTo(this.mouse.clicks) reactions += { case e: MouseClicked => { detailsBtn.doClick() } } } def updateContainer(): Unit = { container.contents.clear() if (model.valueTypeId == EventParameterValueType.Constant.id) { container.contents += fConstant } else { container.contents += label label.text = EventParameterValueType(model.valueTypeId) match { case EventParameterValueType.LocalVariable => getMessageColon("Local_Variable") + " %s".format(model.localVariable) case EventParameterValueType.GlobalVariable => "%s".format(model.globalVariable) } } container.revalidate() container.repaint() } updateContainer() contents += container contents += detailsBtn } object ParameterFullComponent { def addParameterFullComponentsToPanel( owner: Window, panel: DesignGridPanel, parameterFields: Seq[EventParameterField[_]]) = { for (field <- parameterFields) { val fullComponent = new ParameterFullComponent(owner, field) panel.row().grid((new Label(field.name)).peer).add(fullComponent.peer) } } }
toastythought/rpgboss
editor/src/main/scala/rpgboss/editor/uibase/ParameterFields.scala
Scala
agpl-3.0
4,198
package com.snppts.backend import org.scalatra._ import com.mongodb.casbah.Imports._ import scala.util.control.Exception._ class GroupMongoController(mongoCollection: MongoCollection) extends ScalatraServlet { val groupId = "group_id" /** * Insert a new group into the database. * curl -i -H "Accept: application/json" -X POST -d "name=Ale" http://localhost:8080/api/v1/group */ post("/") { val name = params("name") val maxGroupId = mongoMaxGroupId() + 1 val group = MongoDBObject(groupId -> maxGroupId, "name" -> name) mongoCollection += group } def mongoMaxGroupId() : Int = { val fields = MongoDBObject(groupId -> 1) val orderByAsc = -1 val orderBy = MongoDBObject(groupId -> orderByAsc) val result = mongoCollection.findOne(fields = fields, orderBy = orderBy) val firstId = 0 if (result.isDefined) { return result.get.getAs[Int](groupId).get } return firstId } /** * Get all the groups http://localhost:8080/api/v1/group/ */ get("/") { mongoCollection.find() for { x <- mongoCollection} yield x } /** * Get a group from its id http://localhost:8080/api/v1/group/1 */ get("/:group_id") { val optGroupId = catching(classOf[NumberFormatException]) opt params(groupId).toInt val query = MongoDBObject(groupId -> optGroupId) for ( x <- mongoCollection.findOne(query) ) yield x } }
andreacipriani/snppts-backend
snppts-backend/src/main/scala/com/snppts/backend/mongodb/GroupMongoController.scala
Scala
mit
1,413
package scala.c.engine class ArrayStagingArea extends StandardTest { "An 1d array addressing check" should "print the correct results" in { val code = """ void main() { int x[2]; printf("%d\\n", &x[1] - x); }""" checkResults(code) } "2d array pointer arithmetic" should "print the correct results" in { val code = """ void main() { int x[2][2] = {1,2,3,4}; int *ptr = x; printf("%d\\n", *ptr); ptr++; printf("%d\\n", *ptr); ptr++; printf("%d\\n", *ptr); ptr++; printf("%d\\n", *ptr); }""" checkResults(code) } } class ArrayInitTest extends StandardTest { "Sized arrays initialized with initLists" should "print the correct results" in { val code = """ void main() { int padding; // lets test an offset int x[5] = {1, 2, 3, 4, 5}; printf("%d %d %d %d %d\\n", x[0], x[1], x[2], x[3], x[4]); char y[5] = {'a', 'b', 'c', 'd', 'e'}; printf("%c %c %c %c %c\\n", y[0], y[1], y[2], y[3], y[4]); double z[5] = {5.6, 38.5, 2.945, 347.2, 378.2}; printf("%f %f %f %f %f\\n", z[0], z[1], z[2], z[3], z[4]); }""" checkResults(code) } "array indexed with a division binary expression" should "print the correct results" in { val code = """ void main() { int x[5] = {1, 2, 3, 4, 5}; int y = 4; int z = 2; printf("%d\\n", x[2]); printf("%d\\n", x[4 / 2]); printf("%d\\n", x[y / 2]); printf("%d\\n", x[4 / z]); }""" checkResults(code) } "Unsized arrays initialized with initLists" should "print the correct results" in { val code = """ void main() { int x[] = {1, 2, 3, 4, 5}; printf("%d %d %d %d %d\\n", x[0], x[1], x[2], x[3], x[4]); char y[] = {'a', 'b', 'c', 'd', 'e'}; printf("%c %c %c %c %c\\n", y[0], y[1], y[2], y[3], y[4]); double z[] = {5.6, 38.5, 2.945, 347.2, 378.2}; printf("%f %f %f %f %f\\n", z[0], z[1], z[2], z[3], z[4]); }""" checkResults(code) } } class SimpleHigherDimArrays extends StandardTest { "check for array clobbering" should "print the correct results" in { val code = """ void main() { int x[3][9]; x[1][0] = 43424; x[1][1] = 43; x[1][2] = 64565; x[0][0] = 5645; x[0][1] = 878; x[0][2] = 98797; printf("%d %d %d\\n", x[1][0], x[1][1], x[1][2]); printf("%d %d %d\\n", x[0][0], x[0][1], x[0][2]); }""" checkResults(code) } } class HigherDimArrays extends StandardTest { "A 2d array" should "print the correct results" in { val code = """ void main() { int x[2][2]; int i, j = 0; int count = 0; for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { x[i][j] = count; count += 1; } } for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { printf("%d\\n", x[i][j]); } } }""" checkResults(code) } "2d array addressing" should "print the correct results" in { val code = """ void main() { int a[2][3] = {1, 2, 3, 4, 5, 6}; printf("%d\\n", &(a[0][0]) - &(a[0][0])); printf("%d\\n", &(a[0][0]) - &(a[0][1])); printf("%d\\n", &(a[0][0]) - &(a[0][2])); printf("%d\\n", &(a[0][0]) - &(a[1][0])); printf("%d\\n", &(a[0][0]) - &(a[1][1])); }""" checkResults(code, true) } "Initialize a 2d array" should "print the correct results" in { val code = """ void main() { int a[2][3] = {1, 2, 3, 4, 5, 6}; printf("%d\\n", a[0][0]); printf("%d\\n", a[0][1]); printf("%d\\n", a[0][2]); printf("%d\\n", a[1][0]); printf("%d\\n", a[1][1]); printf("%d\\n", a[1][2]); printf("%d\\n", *a[0]); printf("%d\\n", *a[1]); }""" checkResults(code, true) } "Initialize a boundless 2d array" should "print the correct results" in { val code = """ void main() { int a[][3] = {1, 2, 3, 4, 5, 6}; printf("%d\\n", a[0][0]); printf("%d\\n", a[0][1]); printf("%d\\n", a[0][2]); printf("%d\\n", a[1][0]); printf("%d\\n", a[1][1]); printf("%d\\n", a[1][2]); printf("%d\\n", *a[0]); printf("%d\\n", *a[1]); }""" checkResults(code, true) } "access a pointer to a array" should "print the correct results" in { val code = """ void main() { int a[][3] = {1, 2, 3, 4, 5, 6}; int (*ptr)[3] = a; printf("%d %d ", (*ptr)[1], (*ptr)[2]); }""" checkResults(code, true) } "A 3d array" should "print the correct results" in { val code = """ void main() { int x[2][2][2]; int i, j, k = 0; int count = 0; for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { for (k = 0; k < 2; k++) { x[i][j][k] = count; count += 1; } } } for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { for (k = 0; k < 2; k++) { printf("%d\\n", x[i][j][k]); } } } }""" checkResults(code, true) } "A 3d array with different sizes" should "print the correct results" in { val code = """ void main() { int x[2][4][6]; int i, j, k = 0; int count = 0; for (i = 0; i < 2; i++) { for (j = 0; j < 4; j++) { for (k = 0; k < 6; k++) { x[i][j][k] = count; count += 1; } } } for (i = 0; i < 2; i++) { for (j = 0; j < 4; j++) { for (k = 0; k < 6; k++) { printf("%d\\n", x[i][j][k]); } } } }""" checkResults(code, true) } } class ArrayTest extends StandardTest { "Array sanity check" should "print the correct results" in { val code = """ void main() { char s[100] = "hello"; if(s == &s[0]) printf("true.\\n"); if(s == &s) printf("true.\\n"); }""" checkResults(code) } "A trivial array assignment" should "print the correct results" in { val code = """ void main() { int x[5]; x[2] = 5; printf("%d\\n", x[2]); }""" checkResults(code) } "An array indexed by different typed indexes" should "print the correct results" in { val code = """ void main() { long long y = 2; short z = 3; int x[5]; x[y] = 5; x[z] = 10; printf("%d %d\\n", x[y], x[z]); }""" checkResults(code) } "An array with dimensions from a binary expr" should "print the correct results" in { val code = """ void main() { int x[5*5]; x[23] = 5; printf("%d\\n", x[23]); }""" checkResults(code) } "assignment operators on an array element" should "print the correct results" in { val code = """ void main() { int x[5] = {1, 2, 3, 4, 5}; x[0] += 1; x[1] -= 1; x[2] *= 1; x[3] ^= 1; printf("%d %d %d %d\\n", x[0], x[1], x[2], x[3]); }""" checkResults(code) } "A trivial array binary expression" should "print the correct results" in { val code = """ void main() { int x[5]; x[2] = 5; x[3] = 3; printf("%d\\n", x[2] * x[3]); }""" checkResults(code) } "An array subscript with advanced binary expression" should "print the correct results" in { val code = """ void main() { int x[5]; int y = 2; x[1] = 3; x[3] = 12; printf("%d\\n", x[y - 2 + x[1]]); }""" checkResults(code) } "An array prefixed subscript" should "print the correct results" in { val code = """ void main() { int x[5] = {3, 68, 44, 29, 45}; int y = 0; printf("%d %d\\n", x[++y], y); }""" checkResults(code) } "An array postfixed subscript" should "print the correct results" in { val code = """ void main() { int x[5] = {3, 68, 44, 29, 45}; int y = 0; printf("%d %d\\n", x[y++], y); }""" checkResults(code) } "An array being set to a single non-zero value" should "print the correct results" in { val code = """ void main() { int x[5] = {3}; printf("%d %d %d %d\\n", x[0], x[1], x[2], x[3], x[4]); }""" checkResults(code) } }
bdwashbu/cEngine
tests/scala/c/engine/arrays.scala
Scala
apache-2.0
8,732
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql import java.nio.charset.StandardCharsets import java.sql.{Date, Timestamp} import java.util.TimeZone import scala.util.Random import org.apache.spark.{SparkConf, SparkException} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.Expression import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback import org.apache.spark.sql.catalyst.plans.logical.OneRowRelation import org.apache.spark.sql.catalyst.util.DateTimeTestUtils import org.apache.spark.sql.catalyst.util.DateTimeTestUtils.UTC import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types._ /** * Test suite for functions in [[org.apache.spark.sql.functions]]. */ class DataFrameFunctionsSuite extends QueryTest with SharedSparkSession { import testImplicits._ override def sparkConf: SparkConf = super.sparkConf .setAppName("test") .set("spark.sql.parquet.columnarReaderBatchSize", "4096") .set("spark.sql.sources.useV1SourceList", "avro") .set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin") .set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096") //.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager") .set("spark.memory.offHeap.enabled", "true") .set("spark.memory.offHeap.size", "50m") .set("spark.sql.join.preferSortMergeJoin", "false") .set("spark.sql.columnar.codegen.hashAggregate", "false") .set("spark.oap.sql.columnar.wholestagecodegen", "false") .set("spark.sql.columnar.window", "false") .set("spark.unsafe.exceptionOnMemoryLeak", "false") //.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/") .set("spark.sql.columnar.sort.broadcastJoin", "true") .set("spark.oap.sql.columnar.preferColumnar", "true") test("array with column name") { val df = Seq((0, 1)).toDF("a", "b") val row = df.select(array("a", "b")).first() val expectedType = ArrayType(IntegerType, containsNull = false) assert(row.schema(0).dataType === expectedType) assert(row.getAs[Seq[Int]](0) === Seq(0, 1)) } test("array with column expression") { val df = Seq((0, 1)).toDF("a", "b") val row = df.select(array(col("a"), col("b") + col("b"))).first() val expectedType = ArrayType(IntegerType, containsNull = false) assert(row.schema(0).dataType === expectedType) assert(row.getSeq[Int](0) === Seq(0, 2)) } test("map with column expressions") { val df = Seq(1 -> "a").toDF("a", "b") val row = df.select(map($"a" + 1, $"b")).first() val expectedType = MapType(IntegerType, StringType, valueContainsNull = true) assert(row.schema(0).dataType === expectedType) assert(row.getMap[Int, String](0) === Map(2 -> "a")) } ignore("map with arrays") { val df1 = Seq((Seq(1, 2), Seq("a", "b"))).toDF("k", "v") val expectedType = MapType(IntegerType, StringType, valueContainsNull = true) val row = df1.select(map_from_arrays($"k", $"v")).first() assert(row.schema(0).dataType === expectedType) assert(row.getMap[Int, String](0) === Map(1 -> "a", 2 -> "b")) checkAnswer(df1.select(map_from_arrays($"k", $"v")), Seq(Row(Map(1 -> "a", 2 -> "b")))) val df2 = Seq((Seq(1, 2), Seq(null, "b"))).toDF("k", "v") checkAnswer(df2.select(map_from_arrays($"k", $"v")), Seq(Row(Map(1 -> null, 2 -> "b")))) val df3 = Seq((null, null)).toDF("k", "v") checkAnswer(df3.select(map_from_arrays($"k", $"v")), Seq(Row(null))) val df4 = Seq((1, "a")).toDF("k", "v") intercept[AnalysisException] { df4.select(map_from_arrays($"k", $"v")) } val df5 = Seq((Seq("a", null), Seq(1, 2))).toDF("k", "v") val msg1 = intercept[Exception] { df5.select(map_from_arrays($"k", $"v")).collect }.getMessage assert(msg1.contains("Cannot use null as map key")) val df6 = Seq((Seq(1, 2), Seq("a"))).toDF("k", "v") val msg2 = intercept[Exception] { df6.select(map_from_arrays($"k", $"v")).collect }.getMessage assert(msg2.contains("The key array and value array of MapData must have the same length")) } test("struct with column name") { val df = Seq((1, "str")).toDF("a", "b") val row = df.select(struct("a", "b")).first() val expectedType = StructType(Seq( StructField("a", IntegerType, nullable = false), StructField("b", StringType) )) assert(row.schema(0).dataType === expectedType) assert(row.getAs[Row](0) === Row(1, "str")) } test("struct with column expression") { val df = Seq((1, "str")).toDF("a", "b") val row = df.select(struct((col("a") * 2).as("c"), col("b"))).first() val expectedType = StructType(Seq( StructField("c", IntegerType, nullable = false), StructField("b", StringType) )) assert(row.schema(0).dataType === expectedType) assert(row.getAs[Row](0) === Row(2, "str")) } test("struct with column expression to be automatically named") { val df = Seq((1, "str")).toDF("a", "b") val result = df.select(struct((col("a") * 2), col("b"))) val expectedType = StructType(Seq( StructField("col1", IntegerType, nullable = false), StructField("b", StringType) )) assert(result.first.schema(0).dataType === expectedType) checkAnswer(result, Row(Row(2, "str"))) } test("struct with literal columns") { val df = Seq((1, "str1"), (2, "str2")).toDF("a", "b") val result = df.select(struct((col("a") * 2), lit(5.0))) val expectedType = StructType(Seq( StructField("col1", IntegerType, nullable = false), StructField("col2", DoubleType, nullable = false) )) assert(result.first.schema(0).dataType === expectedType) checkAnswer(result, Seq(Row(Row(2, 5.0)), Row(Row(4, 5.0)))) } ignore("struct with all literal columns") { val df = Seq((1, "str1"), (2, "str2")).toDF("a", "b") val result = df.select(struct(lit("v"), lit(5.0))) val expectedType = StructType(Seq( StructField("col1", StringType, nullable = false), StructField("col2", DoubleType, nullable = false) )) assert(result.first.schema(0).dataType === expectedType) checkAnswer(result, Seq(Row(Row("v", 5.0)), Row(Row("v", 5.0)))) } ignore("constant functions") { checkAnswer( sql("SELECT E()"), Row(scala.math.E) ) checkAnswer( sql("SELECT PI()"), Row(scala.math.Pi) ) } test("bitwiseNOT") { checkAnswer( testData2.select(bitwiseNOT($"a")), testData2.collect().toSeq.map(r => Row(~r.getInt(0)))) } test("bin") { val df = Seq[(Integer, Integer)]((12, null)).toDF("a", "b") checkAnswer( df.select(bin("a"), bin("b")), Row("1100", null)) checkAnswer( df.selectExpr("bin(a)", "bin(b)"), Row("1100", null)) } test("if function") { val df = Seq((1, 2)).toDF("a", "b") checkAnswer( df.selectExpr("if(a = 1, 'one', 'not_one')", "if(b = 1, 'one', 'not_one')"), Row("one", "not_one")) } test("misc md5 function") { val df = Seq(("ABC", Array[Byte](1, 2, 3, 4, 5, 6))).toDF("a", "b") checkAnswer( df.select(md5($"a"), md5($"b")), Row("902fbdd2b1df0c4f70b4a5d23525e932", "6ac1e56bc78f031059be7be854522c4c")) checkAnswer( df.selectExpr("md5(a)", "md5(b)"), Row("902fbdd2b1df0c4f70b4a5d23525e932", "6ac1e56bc78f031059be7be854522c4c")) } test("misc sha1 function") { val df = Seq(("ABC", "ABC".getBytes(StandardCharsets.UTF_8))).toDF("a", "b") checkAnswer( df.select(sha1($"a"), sha1($"b")), Row("3c01bdbb26f358bab27f267924aa2c9a03fcfdb8", "3c01bdbb26f358bab27f267924aa2c9a03fcfdb8")) val dfEmpty = Seq(("", "".getBytes(StandardCharsets.UTF_8))).toDF("a", "b") checkAnswer( dfEmpty.selectExpr("sha1(a)", "sha1(b)"), Row("da39a3ee5e6b4b0d3255bfef95601890afd80709", "da39a3ee5e6b4b0d3255bfef95601890afd80709")) } test("misc sha2 function") { val df = Seq(("ABC", Array[Byte](1, 2, 3, 4, 5, 6))).toDF("a", "b") checkAnswer( df.select(sha2($"a", 256), sha2($"b", 256)), Row("b5d4045c3f466fa91fe2cc6abe79232a1a57cdf104f7a26e716e0a1e2789df78", "7192385c3c0605de55bb9476ce1d90748190ecb32a8eed7f5207b30cf6a1fe89")) checkAnswer( df.selectExpr("sha2(a, 256)", "sha2(b, 256)"), Row("b5d4045c3f466fa91fe2cc6abe79232a1a57cdf104f7a26e716e0a1e2789df78", "7192385c3c0605de55bb9476ce1d90748190ecb32a8eed7f5207b30cf6a1fe89")) intercept[IllegalArgumentException] { df.select(sha2($"a", 1024)) } } test("misc crc32 function") { val df = Seq(("ABC", Array[Byte](1, 2, 3, 4, 5, 6))).toDF("a", "b") checkAnswer( df.select(crc32($"a"), crc32($"b")), Row(2743272264L, 2180413220L)) checkAnswer( df.selectExpr("crc32(a)", "crc32(b)"), Row(2743272264L, 2180413220L)) } test("string function find_in_set") { val df = Seq(("abc,b,ab,c,def", "abc,b,ab,c,def")).toDF("a", "b") checkAnswer( df.selectExpr("find_in_set('ab', a)", "find_in_set('x', b)"), Row(3, 0)) } test("conditional function: least") { checkAnswer( testData2.select(least(lit(-1), lit(0), col("a"), col("b"))).limit(1), Row(-1) ) checkAnswer( sql("SELECT least(a, 2) as l from testData2 order by l"), Seq(Row(1), Row(1), Row(2), Row(2), Row(2), Row(2)) ) } test("conditional function: greatest") { checkAnswer( testData2.select(greatest(lit(2), lit(3), col("a"), col("b"))).limit(1), Row(3) ) checkAnswer( sql("SELECT greatest(a, 2) as g from testData2 order by g"), Seq(Row(2), Row(2), Row(2), Row(2), Row(3), Row(3)) ) } ignore("pmod") { val intData = Seq((7, 3), (-7, 3)).toDF("a", "b") checkAnswer( intData.select(pmod($"a", $"b")), Seq(Row(1), Row(2)) ) checkAnswer( intData.select(pmod($"a", lit(3))), Seq(Row(1), Row(2)) ) checkAnswer( intData.select(pmod(lit(-7), $"b")), Seq(Row(2), Row(2)) ) checkAnswer( intData.selectExpr("pmod(a, b)"), Seq(Row(1), Row(2)) ) checkAnswer( intData.selectExpr("pmod(a, 3)"), Seq(Row(1), Row(2)) ) checkAnswer( intData.selectExpr("pmod(-7, b)"), Seq(Row(2), Row(2)) ) val doubleData = Seq((7.2, 4.1)).toDF("a", "b") checkAnswer( doubleData.select(pmod($"a", $"b")), Seq(Row(3.1000000000000005)) // same as hive ) checkAnswer( doubleData.select(pmod(lit(2), lit(Int.MaxValue))), Seq(Row(2)) ) } test("array_sort with lambda functions") { spark.udf.register("fAsc", (x: Int, y: Int) => { if (x < y) -1 else if (x == y) 0 else 1 }) spark.udf.register("fDesc", (x: Int, y: Int) => { if (x < y) 1 else if (x == y) 0 else -1 }) spark.udf.register("fString", (x: String, y: String) => { if (x == null && y == null) 0 else if (x == null) 1 else if (y == null) -1 else if (x < y) 1 else if (x == y) 0 else -1 }) spark.udf.register("fStringLength", (x: String, y: String) => { if (x == null && y == null) 0 else if (x == null) 1 else if (y == null) -1 else if (x.length < y.length) -1 else if (x.length == y.length) 0 else 1 }) val df1 = Seq(Array[Int](3, 2, 5, 1, 2)).toDF("a") checkAnswer( df1.selectExpr("array_sort(a, (x, y) -> fAsc(x, y))"), Seq( Row(Seq(1, 2, 2, 3, 5))) ) checkAnswer( df1.selectExpr("array_sort(a, (x, y) -> fDesc(x, y))"), Seq( Row(Seq(5, 3, 2, 2, 1))) ) val df2 = Seq(Array[String]("bc", "ab", "dc")).toDF("a") checkAnswer( df2.selectExpr("array_sort(a, (x, y) -> fString(x, y))"), Seq( Row(Seq("dc", "bc", "ab"))) ) val df3 = Seq(Array[String]("a", "abcd", "abc")).toDF("a") checkAnswer( df3.selectExpr("array_sort(a, (x, y) -> fStringLength(x, y))"), Seq( Row(Seq("a", "abc", "abcd"))) ) val df4 = Seq((Array[Array[Int]](Array(2, 3, 1), Array(4, 2, 1, 4), Array(1, 2)), "x")).toDF("a", "b") checkAnswer( df4.selectExpr("array_sort(a, (x, y) -> fAsc(cardinality(x), cardinality(y)))"), Seq( Row(Seq[Seq[Int]](Seq(1, 2), Seq(2, 3, 1), Seq(4, 2, 1, 4)))) ) val df5 = Seq(Array[String]("bc", null, "ab", "dc")).toDF("a") checkAnswer( df5.selectExpr("array_sort(a, (x, y) -> fString(x, y))"), Seq( Row(Seq("dc", "bc", "ab", null))) ) spark.sql("drop temporary function fAsc") spark.sql("drop temporary function fDesc") spark.sql("drop temporary function fString") spark.sql("drop temporary function fStringLength") } test("sort_array/array_sort functions") { val df = Seq( (Array[Int](2, 1, 3), Array("b", "c", "a")), (Array.empty[Int], Array.empty[String]), (null, null) ).toDF("a", "b") checkAnswer( df.select(sort_array($"a"), sort_array($"b")), Seq( Row(Seq(1, 2, 3), Seq("a", "b", "c")), Row(Seq.empty[Int], Seq.empty[String]), Row(null, null)) ) checkAnswer( df.select(sort_array($"a", false), sort_array($"b", false)), Seq( Row(Seq(3, 2, 1), Seq("c", "b", "a")), Row(Seq.empty[Int], Seq.empty[String]), Row(null, null)) ) checkAnswer( df.selectExpr("sort_array(a)", "sort_array(b)"), Seq( Row(Seq(1, 2, 3), Seq("a", "b", "c")), Row(Seq.empty[Int], Seq.empty[String]), Row(null, null)) ) checkAnswer( df.selectExpr("sort_array(a, true)", "sort_array(b, false)"), Seq( Row(Seq(1, 2, 3), Seq("c", "b", "a")), Row(Seq.empty[Int], Seq.empty[String]), Row(null, null)) ) val df2 = Seq((Array[Array[Int]](Array(2), Array(1), Array(2, 4), null), "x")).toDF("a", "b") checkAnswer( df2.selectExpr("sort_array(a, true)", "sort_array(a, false)"), Seq( Row( Seq[Seq[Int]](null, Seq(1), Seq(2), Seq(2, 4)), Seq[Seq[Int]](Seq(2, 4), Seq(2), Seq(1), null))) ) val df3 = Seq(("xxx", "x")).toDF("a", "b") assert(intercept[AnalysisException] { df3.selectExpr("sort_array(a)").collect() }.getMessage().contains("only supports array input")) checkAnswer( df.select(array_sort($"a"), array_sort($"b")), Seq( Row(Seq(1, 2, 3), Seq("a", "b", "c")), Row(Seq.empty[Int], Seq.empty[String]), Row(null, null)) ) checkAnswer( df.selectExpr("array_sort(a)", "array_sort(b)"), Seq( Row(Seq(1, 2, 3), Seq("a", "b", "c")), Row(Seq.empty[Int], Seq.empty[String]), Row(null, null)) ) checkAnswer( df2.selectExpr("array_sort(a)"), Seq(Row(Seq[Seq[Int]](Seq(1), Seq(2), Seq(2, 4), null))) ) assert(intercept[AnalysisException] { df3.selectExpr("array_sort(a)").collect() }.getMessage().contains("argument 1 requires array type, however, '`a`' is of string type")) } def testSizeOfArray(sizeOfNull: Any): Unit = { val df = Seq( (Seq[Int](1, 2), "x"), (Seq[Int](), "y"), (Seq[Int](1, 2, 3), "z"), (null, "empty") ).toDF("a", "b") checkAnswer(df.select(size($"a")), Seq(Row(2), Row(0), Row(3), Row(sizeOfNull))) checkAnswer(df.selectExpr("size(a)"), Seq(Row(2), Row(0), Row(3), Row(sizeOfNull))) checkAnswer(df.selectExpr("cardinality(a)"), Seq(Row(2L), Row(0L), Row(3L), Row(sizeOfNull))) } test("array size function - legacy") { withSQLConf(SQLConf.LEGACY_SIZE_OF_NULL.key -> "true") { testSizeOfArray(sizeOfNull = -1) } } test("array size function") { withSQLConf(SQLConf.LEGACY_SIZE_OF_NULL.key -> "false") { testSizeOfArray(sizeOfNull = null) } // size(null) should return null under ansi mode. withSQLConf( SQLConf.LEGACY_SIZE_OF_NULL.key -> "true", SQLConf.ANSI_ENABLED.key -> "true") { testSizeOfArray(sizeOfNull = null) } } test("dataframe arrays_zip function") { val df1 = Seq((Seq(9001, 9002, 9003), Seq(4, 5, 6))).toDF("val1", "val2") val df2 = Seq((Seq("a", "b"), Seq(true, false), Seq(10, 11))).toDF("val1", "val2", "val3") val df3 = Seq((Seq("a", "b"), Seq(4, 5, 6))).toDF("val1", "val2") val df4 = Seq((Seq("a", "b", null), Seq(4L))).toDF("val1", "val2") val df5 = Seq((Seq(-1), Seq(null), Seq(), Seq(null, null))).toDF("val1", "val2", "val3", "val4") val df6 = Seq((Seq(192.toByte, 256.toByte), Seq(1.1), Seq(), Seq(null, null))) .toDF("v1", "v2", "v3", "v4") val df7 = Seq((Seq(Seq(1, 2, 3), Seq(4, 5)), Seq(1.1, 2.2))).toDF("v1", "v2") val df8 = Seq((Seq(Array[Byte](1.toByte, 5.toByte)), Seq(null))).toDF("v1", "v2") val expectedValue1 = Row(Seq(Row(9001, 4), Row(9002, 5), Row(9003, 6))) checkAnswer(df1.select(arrays_zip($"val1", $"val2")), expectedValue1) checkAnswer(df1.selectExpr("arrays_zip(val1, val2)"), expectedValue1) val expectedValue2 = Row(Seq(Row("a", true, 10), Row("b", false, 11))) checkAnswer(df2.select(arrays_zip($"val1", $"val2", $"val3")), expectedValue2) checkAnswer(df2.selectExpr("arrays_zip(val1, val2, val3)"), expectedValue2) val expectedValue3 = Row(Seq(Row("a", 4), Row("b", 5), Row(null, 6))) checkAnswer(df3.select(arrays_zip($"val1", $"val2")), expectedValue3) checkAnswer(df3.selectExpr("arrays_zip(val1, val2)"), expectedValue3) val expectedValue4 = Row(Seq(Row("a", 4L), Row("b", null), Row(null, null))) checkAnswer(df4.select(arrays_zip($"val1", $"val2")), expectedValue4) checkAnswer(df4.selectExpr("arrays_zip(val1, val2)"), expectedValue4) val expectedValue5 = Row(Seq(Row(-1, null, null, null), Row(null, null, null, null))) checkAnswer(df5.select(arrays_zip($"val1", $"val2", $"val3", $"val4")), expectedValue5) checkAnswer(df5.selectExpr("arrays_zip(val1, val2, val3, val4)"), expectedValue5) val expectedValue6 = Row(Seq( Row(192.toByte, 1.1, null, null), Row(256.toByte, null, null, null))) checkAnswer(df6.select(arrays_zip($"v1", $"v2", $"v3", $"v4")), expectedValue6) checkAnswer(df6.selectExpr("arrays_zip(v1, v2, v3, v4)"), expectedValue6) val expectedValue7 = Row(Seq( Row(Seq(1, 2, 3), 1.1), Row(Seq(4, 5), 2.2))) checkAnswer(df7.select(arrays_zip($"v1", $"v2")), expectedValue7) checkAnswer(df7.selectExpr("arrays_zip(v1, v2)"), expectedValue7) val expectedValue8 = Row(Seq( Row(Array[Byte](1.toByte, 5.toByte), null))) checkAnswer(df8.select(arrays_zip($"v1", $"v2")), expectedValue8) checkAnswer(df8.selectExpr("arrays_zip(v1, v2)"), expectedValue8) } testWithWholeStageCodegenOnAndOff("SPARK-24633: arrays_zip splits input " + "processing correctly") { _ => val df = spark.range(1) val exprs = (0 to 5).map(x => array($"id" + lit(x))) checkAnswer(df.select(arrays_zip(exprs: _*)), Row(Seq(Row(0, 1, 2, 3, 4, 5)))) } def testSizeOfMap(sizeOfNull: Any): Unit = { val df = Seq( (Map[Int, Int](1 -> 1, 2 -> 2), "x"), (Map[Int, Int](), "y"), (Map[Int, Int](1 -> 1, 2 -> 2, 3 -> 3), "z"), (null, "empty") ).toDF("a", "b") checkAnswer(df.select(size($"a")), Seq(Row(2), Row(0), Row(3), Row(sizeOfNull))) checkAnswer(df.selectExpr("size(a)"), Seq(Row(2), Row(0), Row(3), Row(sizeOfNull))) } test("map size function - legacy") { withSQLConf(SQLConf.LEGACY_SIZE_OF_NULL.key -> "true") { testSizeOfMap(sizeOfNull = -1: Int) } } test("map size function") { withSQLConf(SQLConf.LEGACY_SIZE_OF_NULL.key -> "false") { testSizeOfMap(sizeOfNull = null) } // size(null) should return null under ansi mode. withSQLConf( SQLConf.LEGACY_SIZE_OF_NULL.key -> "true", SQLConf.ANSI_ENABLED.key -> "true") { testSizeOfMap(sizeOfNull = null) } } test("map_keys/map_values function") { val df = Seq( (Map[Int, Int](1 -> 100, 2 -> 200), "x"), (Map[Int, Int](), "y"), (Map[Int, Int](1 -> 100, 2 -> 200, 3 -> 300), "z") ).toDF("a", "b") checkAnswer( df.selectExpr("map_keys(a)"), Seq(Row(Seq(1, 2)), Row(Seq.empty), Row(Seq(1, 2, 3))) ) checkAnswer( df.selectExpr("map_values(a)"), Seq(Row(Seq(100, 200)), Row(Seq.empty), Row(Seq(100, 200, 300))) ) } ignore("map_entries") { // Primitive-type elements val idf = Seq( Map[Int, Int](1 -> 100, 2 -> 200, 3 -> 300), Map[Int, Int](), null ).toDF("m") val iExpected = Seq( Row(Seq(Row(1, 100), Row(2, 200), Row(3, 300))), Row(Seq.empty), Row(null) ) def testPrimitiveType(): Unit = { checkAnswer(idf.select(map_entries($"m")), iExpected) checkAnswer(idf.selectExpr("map_entries(m)"), iExpected) checkAnswer(idf.selectExpr("map_entries(map(1, null, 2, null))"), Seq.fill(iExpected.length)(Row(Seq(Row(1, null), Row(2, null))))) } // Test with local relation, the Project will be evaluated without codegen testPrimitiveType() // Test with cached relation, the Project will be evaluated with codegen idf.cache() testPrimitiveType() // Non-primitive-type elements val sdf = Seq( Map[String, String]("a" -> "f", "b" -> "o", "c" -> "o"), Map[String, String]("a" -> null, "b" -> null), Map[String, String](), null ).toDF("m") val sExpected = Seq( Row(Seq(Row("a", "f"), Row("b", "o"), Row("c", "o"))), Row(Seq(Row("a", null), Row("b", null))), Row(Seq.empty), Row(null) ) def testNonPrimitiveType(): Unit = { checkAnswer(sdf.select(map_entries($"m")), sExpected) checkAnswer(sdf.selectExpr("map_entries(m)"), sExpected) } // Test with local relation, the Project will be evaluated without codegen testNonPrimitiveType() // Test with cached relation, the Project will be evaluated with codegen sdf.cache() testNonPrimitiveType() } test("map_concat function") { val df1 = Seq( (Map[Int, Int](1 -> 100, 2 -> 200), Map[Int, Int](3 -> 300, 4 -> 400)), (Map[Int, Int](1 -> 100, 2 -> 200), Map[Int, Int](3 -> 300, 1 -> 400)), (null, Map[Int, Int](3 -> 300, 4 -> 400)) ).toDF("map1", "map2") val expected1a = Seq( Row(Map(1 -> 100, 2 -> 200, 3 -> 300, 4 -> 400)), Row(Map(1 -> 400, 2 -> 200, 3 -> 300)), Row(null) ) intercept[SparkException](df1.selectExpr("map_concat(map1, map2)").collect()) intercept[SparkException](df1.select(map_concat($"map1", $"map2")).collect()) withSQLConf(SQLConf.MAP_KEY_DEDUP_POLICY.key -> SQLConf.MapKeyDedupPolicy.LAST_WIN.toString) { checkAnswer(df1.selectExpr("map_concat(map1, map2)"), expected1a) checkAnswer(df1.select(map_concat($"map1", $"map2")), expected1a) } val expected1b = Seq( Row(Map(1 -> 100, 2 -> 200)), Row(Map(1 -> 100, 2 -> 200)), Row(null) ) checkAnswer(df1.selectExpr("map_concat(map1)"), expected1b) checkAnswer(df1.select(map_concat($"map1")), expected1b) val df2 = Seq( ( Map[Array[Int], Int](Array(1) -> 100, Array(2) -> 200), Map[String, Int]("3" -> 300, "4" -> 400) ) ).toDF("map1", "map2") val expected2 = Seq(Row(Map())) checkAnswer(df2.selectExpr("map_concat()"), expected2) checkAnswer(df2.select(map_concat()), expected2) val df3 = { val schema = StructType( StructField("map1", MapType(StringType, IntegerType, true), false) :: StructField("map2", MapType(StringType, IntegerType, false), false) :: Nil ) val data = Seq( Row(Map[String, Any]("a" -> 1, "b" -> null), Map[String, Any]("c" -> 3, "d" -> 4)), Row(Map[String, Any]("a" -> 1, "b" -> 2), Map[String, Any]("c" -> 3, "d" -> 4)) ) spark.createDataFrame(spark.sparkContext.parallelize(data), schema) } val expected3 = Seq( Row(Map[String, Any]("a" -> 1, "b" -> null, "c" -> 3, "d" -> 4)), Row(Map[String, Any]("a" -> 1, "b" -> 2, "c" -> 3, "d" -> 4)) ) checkAnswer(df3.selectExpr("map_concat(map1, map2)"), expected3) checkAnswer(df3.select(map_concat($"map1", $"map2")), expected3) val expectedMessage1 = "input to function map_concat should all be the same type" assert(intercept[AnalysisException] { df2.selectExpr("map_concat(map1, map2)").collect() }.getMessage().contains(expectedMessage1)) assert(intercept[AnalysisException] { df2.select(map_concat($"map1", $"map2")).collect() }.getMessage().contains(expectedMessage1)) val expectedMessage2 = "input to function map_concat should all be of type map" assert(intercept[AnalysisException] { df2.selectExpr("map_concat(map1, 12)").collect() }.getMessage().contains(expectedMessage2)) assert(intercept[AnalysisException] { df2.select(map_concat($"map1", lit(12))).collect() }.getMessage().contains(expectedMessage2)) } ignore("map_from_entries function") { // Test cases with primitive-type keys and values val idf = Seq( Seq((1, 10), (2, 20), (3, 10)), Seq((1, 10), null, (2, 20)), Seq.empty, null ).toDF("a") val iExpected = Seq( Row(Map(1 -> 10, 2 -> 20, 3 -> 10)), Row(null), Row(Map.empty), Row(null)) def testPrimitiveType(): Unit = { checkAnswer(idf.select(map_from_entries($"a")), iExpected) checkAnswer(idf.selectExpr("map_from_entries(a)"), iExpected) checkAnswer(idf.selectExpr("map_from_entries(array(struct(1, null), struct(2, null)))"), Seq.fill(iExpected.length)(Row(Map(1 -> null, 2 -> null)))) } // Test with local relation, the Project will be evaluated without codegen testPrimitiveType() // Test with cached relation, the Project will be evaluated with codegen idf.cache() testPrimitiveType() // Test cases with non-primitive-type keys and values val sdf = Seq( Seq(("a", "aa"), ("b", "bb"), ("c", "aa")), Seq(("a", "aa"), null, ("b", "bb")), Seq(("a", null), ("b", null)), Seq.empty, null ).toDF("a") val sExpected = Seq( Row(Map("a" -> "aa", "b" -> "bb", "c" -> "aa")), Row(null), Row(Map("a" -> null, "b" -> null)), Row(Map.empty), Row(null)) def testNonPrimitiveType(): Unit = { checkAnswer(sdf.select(map_from_entries($"a")), sExpected) checkAnswer(sdf.selectExpr("map_from_entries(a)"), sExpected) } // Test with local relation, the Project will be evaluated without codegen testNonPrimitiveType() // Test with cached relation, the Project will be evaluated with codegen sdf.cache() testNonPrimitiveType() } ignore("array contains function") { val df = Seq( (Seq[Int](1, 2), "x", 1), (Seq[Int](), "x", 1) ).toDF("a", "b", "c") // Simple test cases checkAnswer( df.select(array_contains(df("a"), 1)), Seq(Row(true), Row(false)) ) checkAnswer( df.selectExpr("array_contains(a, 1)"), Seq(Row(true), Row(false)) ) checkAnswer( df.select(array_contains(df("a"), df("c"))), Seq(Row(true), Row(false)) ) checkAnswer( df.selectExpr("array_contains(a, c)"), Seq(Row(true), Row(false)) ) // In hive, this errors because null has no type information intercept[AnalysisException] { df.select(array_contains(df("a"), null)) } intercept[AnalysisException] { df.selectExpr("array_contains(a, null)") } intercept[AnalysisException] { df.selectExpr("array_contains(null, 1)") } checkAnswer( df.selectExpr("array_contains(array(array(1), null)[0], 1)"), Seq(Row(true), Row(true)) ) checkAnswer( df.selectExpr("array_contains(array(1, null), array(1, null)[0])"), Seq(Row(true), Row(true)) ) checkAnswer( OneRowRelation().selectExpr("array_contains(array(1), 1.23D)"), Seq(Row(false)) ) checkAnswer( OneRowRelation().selectExpr("array_contains(array(1), 1.0D)"), Seq(Row(true)) ) checkAnswer( OneRowRelation().selectExpr("array_contains(array(1.0D), 1)"), Seq(Row(true)) ) checkAnswer( OneRowRelation().selectExpr("array_contains(array(1.23D), 1)"), Seq(Row(false)) ) checkAnswer( OneRowRelation().selectExpr("array_contains(array(array(1)), array(1.0D))"), Seq(Row(true)) ) checkAnswer( OneRowRelation().selectExpr("array_contains(array(array(1)), array(1.23D))"), Seq(Row(false)) ) val e1 = intercept[AnalysisException] { OneRowRelation().selectExpr("array_contains(array(1), .01234567890123456790123456780)") } val errorMsg1 = s""" |Input to function array_contains should have been array followed by a |value with same element type, but it's [array<int>, decimal(38,29)]. """.stripMargin.replace("\\n", " ").trim() assert(e1.message.contains(errorMsg1)) val e2 = intercept[AnalysisException] { OneRowRelation().selectExpr("array_contains(array(1), 'foo')") } val errorMsg2 = s""" |Input to function array_contains should have been array followed by a |value with same element type, but it's [array<int>, string]. """.stripMargin.replace("\\n", " ").trim() assert(e2.message.contains(errorMsg2)) } ignore("SPARK-29600: ArrayContains function may return incorrect result for DecimalType") { checkAnswer( sql("select array_contains(array(1.10), 1.1)"), Seq(Row(true)) ) checkAnswer( sql("SELECT array_contains(array(1.1), 1.10)"), Seq(Row(true)) ) checkAnswer( sql("SELECT array_contains(array(1.11), 1.1)"), Seq(Row(false)) ) } test("arrays_overlap function") { val df = Seq( (Seq[Option[Int]](Some(1), Some(2)), Seq[Option[Int]](Some(-1), Some(10))), (Seq[Option[Int]](Some(1), Some(2)), Seq[Option[Int]](Some(-1), None)), (Seq[Option[Int]](Some(3), Some(2)), Seq[Option[Int]](Some(1), Some(2))) ).toDF("a", "b") val answer = Seq(Row(false), Row(null), Row(true)) checkAnswer(df.select(arrays_overlap(df("a"), df("b"))), answer) checkAnswer(df.selectExpr("arrays_overlap(a, b)"), answer) checkAnswer( Seq((Seq(1, 2, 3), Seq(2.0, 2.5))).toDF("a", "b").selectExpr("arrays_overlap(a, b)"), Row(true)) intercept[AnalysisException] { sql("select arrays_overlap(array(1, 2, 3), array('a', 'b', 'c'))") } intercept[AnalysisException] { sql("select arrays_overlap(null, null)") } intercept[AnalysisException] { sql("select arrays_overlap(map(1, 2), map(3, 4))") } } test("slice function") { val df = Seq( Seq(1, 2, 3), Seq(4, 5) ).toDF("x") val answer = Seq(Row(Seq(2, 3)), Row(Seq(5))) checkAnswer(df.select(slice(df("x"), 2, 2)), answer) checkAnswer(df.selectExpr("slice(x, 2, 2)"), answer) val answerNegative = Seq(Row(Seq(3)), Row(Seq(5))) checkAnswer(df.select(slice(df("x"), -1, 1)), answerNegative) checkAnswer(df.selectExpr("slice(x, -1, 1)"), answerNegative) } test("array_join function") { val df = Seq( (Seq[String]("a", "b"), ","), (Seq[String]("a", null, "b"), ","), (Seq.empty[String], ",") ).toDF("x", "delimiter") checkAnswer( df.select(array_join(df("x"), ";")), Seq(Row("a;b"), Row("a;b"), Row("")) ) checkAnswer( df.select(array_join(df("x"), ";", "NULL")), Seq(Row("a;b"), Row("a;NULL;b"), Row("")) ) checkAnswer( df.selectExpr("array_join(x, delimiter)"), Seq(Row("a,b"), Row("a,b"), Row(""))) checkAnswer( df.selectExpr("array_join(x, delimiter, 'NULL')"), Seq(Row("a,b"), Row("a,NULL,b"), Row(""))) val idf = Seq(Seq(1, 2, 3)).toDF("x") checkAnswer( idf.select(array_join(idf("x"), ", ")), Seq(Row("1, 2, 3")) ) checkAnswer( idf.selectExpr("array_join(x, ', ')"), Seq(Row("1, 2, 3")) ) intercept[AnalysisException] { idf.selectExpr("array_join(x, 1)") } intercept[AnalysisException] { idf.selectExpr("array_join(x, ', ', 1)") } } test("array_min function") { val df = Seq( Seq[Option[Int]](Some(1), Some(3), Some(2)), Seq.empty[Option[Int]], Seq[Option[Int]](None), Seq[Option[Int]](None, Some(1), Some(-100)) ).toDF("a") val answer = Seq(Row(1), Row(null), Row(null), Row(-100)) checkAnswer(df.select(array_min(df("a"))), answer) checkAnswer(df.selectExpr("array_min(a)"), answer) } test("array_max function") { val df = Seq( Seq[Option[Int]](Some(1), Some(3), Some(2)), Seq.empty[Option[Int]], Seq[Option[Int]](None), Seq[Option[Int]](None, Some(1), Some(-100)) ).toDF("a") val answer = Seq(Row(3), Row(null), Row(null), Row(1)) checkAnswer(df.select(array_max(df("a"))), answer) checkAnswer(df.selectExpr("array_max(a)"), answer) } ignore("sequence") { checkAnswer(Seq((-2, 2)).toDF().select(sequence($"_1", $"_2")), Seq(Row(Array(-2, -1, 0, 1, 2)))) checkAnswer(Seq((7, 2, -2)).toDF().select(sequence($"_1", $"_2", $"_3")), Seq(Row(Array(7, 5, 3)))) checkAnswer( spark.sql("select sequence(" + " cast('2018-01-01 00:00:00' as timestamp)" + ", cast('2018-01-02 00:00:00' as timestamp)" + ", interval 12 hours)"), Seq(Row(Array( Timestamp.valueOf("2018-01-01 00:00:00"), Timestamp.valueOf("2018-01-01 12:00:00"), Timestamp.valueOf("2018-01-02 00:00:00"))))) DateTimeTestUtils.withDefaultTimeZone(UTC) { checkAnswer( spark.sql("select sequence(" + " cast('2018-01-01' as date)" + ", cast('2018-03-01' as date)" + ", interval 1 month)"), Seq(Row(Array( Date.valueOf("2018-01-01"), Date.valueOf("2018-02-01"), Date.valueOf("2018-03-01"))))) } // test type coercion checkAnswer( Seq((1.toByte, 3L, 1)).toDF().select(sequence($"_1", $"_2", $"_3")), Seq(Row(Array(1L, 2L, 3L)))) checkAnswer( spark.sql("select sequence(" + " cast('2018-01-01' as date)" + ", cast('2018-01-02 00:00:00' as timestamp)" + ", interval 12 hours)"), Seq(Row(Array( Timestamp.valueOf("2018-01-01 00:00:00"), Timestamp.valueOf("2018-01-01 12:00:00"), Timestamp.valueOf("2018-01-02 00:00:00"))))) // test invalid data types intercept[AnalysisException] { Seq((true, false)).toDF().selectExpr("sequence(_1, _2)") } intercept[AnalysisException] { Seq((true, false, 42)).toDF().selectExpr("sequence(_1, _2, _3)") } intercept[AnalysisException] { Seq((1, 2, 0.5)).toDF().selectExpr("sequence(_1, _2, _3)") } } ignore("reverse function - string") { val oneRowDF = Seq(("Spark", 3215)).toDF("s", "i") def testString(): Unit = { checkAnswer(oneRowDF.select(reverse($"s")), Seq(Row("krapS"))) checkAnswer(oneRowDF.selectExpr("reverse(s)"), Seq(Row("krapS"))) checkAnswer(oneRowDF.select(reverse($"i")), Seq(Row("5123"))) checkAnswer(oneRowDF.selectExpr("reverse(i)"), Seq(Row("5123"))) checkAnswer(oneRowDF.selectExpr("reverse(null)"), Seq(Row(null))) } // Test with local relation, the Project will be evaluated without codegen testString() // Test with cached relation, the Project will be evaluated with codegen oneRowDF.cache() testString() } ignore("reverse function - array for primitive type not containing null") { val idfNotContainsNull = Seq( Seq(1, 9, 8, 7), Seq(5, 8, 9, 7, 2), Seq.empty, null ).toDF("i") def testArrayOfPrimitiveTypeNotContainsNull(): Unit = { checkAnswer( idfNotContainsNull.select(reverse($"i")), Seq(Row(Seq(7, 8, 9, 1)), Row(Seq(2, 7, 9, 8, 5)), Row(Seq.empty), Row(null)) ) checkAnswer( idfNotContainsNull.selectExpr("reverse(i)"), Seq(Row(Seq(7, 8, 9, 1)), Row(Seq(2, 7, 9, 8, 5)), Row(Seq.empty), Row(null)) ) } // Test with local relation, the Project will be evaluated without codegen testArrayOfPrimitiveTypeNotContainsNull() // Test with cached relation, the Project will be evaluated with codegen idfNotContainsNull.cache() testArrayOfPrimitiveTypeNotContainsNull() } ignore("reverse function - array for primitive type containing null") { val idfContainsNull = Seq[Seq[Integer]]( Seq(1, 9, 8, null, 7), Seq(null, 5, 8, 9, 7, 2), Seq.empty, null ).toDF("i") def testArrayOfPrimitiveTypeContainsNull(): Unit = { checkAnswer( idfContainsNull.select(reverse($"i")), Seq(Row(Seq(7, null, 8, 9, 1)), Row(Seq(2, 7, 9, 8, 5, null)), Row(Seq.empty), Row(null)) ) checkAnswer( idfContainsNull.selectExpr("reverse(i)"), Seq(Row(Seq(7, null, 8, 9, 1)), Row(Seq(2, 7, 9, 8, 5, null)), Row(Seq.empty), Row(null)) ) } // Test with local relation, the Project will be evaluated without codegen testArrayOfPrimitiveTypeContainsNull() // Test with cached relation, the Project will be evaluated with codegen idfContainsNull.cache() testArrayOfPrimitiveTypeContainsNull() } ignore("reverse function - array for non-primitive type") { val sdf = Seq( Seq("c", "a", "b"), Seq("b", null, "c", null), Seq.empty, null ).toDF("s") def testArrayOfNonPrimitiveType(): Unit = { checkAnswer( sdf.select(reverse($"s")), Seq(Row(Seq("b", "a", "c")), Row(Seq(null, "c", null, "b")), Row(Seq.empty), Row(null)) ) checkAnswer( sdf.selectExpr("reverse(s)"), Seq(Row(Seq("b", "a", "c")), Row(Seq(null, "c", null, "b")), Row(Seq.empty), Row(null)) ) checkAnswer( sdf.selectExpr("reverse(array(array(1, 2), array(3, 4)))"), Seq.fill(sdf.count().toInt)(Row(Seq(Seq(3, 4), Seq(1, 2)))) ) } // Test with local relation, the Project will be evaluated without codegen testArrayOfNonPrimitiveType() // Test with cached relation, the Project will be evaluated with codegen sdf.cache() testArrayOfNonPrimitiveType() } test("reverse function - data type mismatch") { val ex1 = intercept[AnalysisException] { sql("select reverse(struct(1, 'a'))") } assert(ex1.getMessage.contains("data type mismatch")) val ex2 = intercept[AnalysisException] { sql("select reverse(map(1, 'a'))") } assert(ex2.getMessage.contains("data type mismatch")) } ignore("array position function") { val df = Seq( (Seq[Int](1, 2), "x", 1), (Seq[Int](), "x", 1) ).toDF("a", "b", "c") checkAnswer( df.select(array_position(df("a"), 1)), Seq(Row(1L), Row(0L)) ) checkAnswer( df.selectExpr("array_position(a, 1)"), Seq(Row(1L), Row(0L)) ) checkAnswer( df.selectExpr("array_position(a, c)"), Seq(Row(1L), Row(0L)) ) checkAnswer( df.select(array_position(df("a"), df("c"))), Seq(Row(1L), Row(0L)) ) checkAnswer( df.select(array_position(df("a"), null)), Seq(Row(null), Row(null)) ) checkAnswer( df.selectExpr("array_position(a, null)"), Seq(Row(null), Row(null)) ) checkAnswer( OneRowRelation().selectExpr("array_position(array(1), 1.23D)"), Seq(Row(0L)) ) checkAnswer( OneRowRelation().selectExpr("array_position(array(1), 1.0D)"), Seq(Row(1L)) ) checkAnswer( OneRowRelation().selectExpr("array_position(array(1.D), 1)"), Seq(Row(1L)) ) checkAnswer( OneRowRelation().selectExpr("array_position(array(1.23D), 1)"), Seq(Row(0L)) ) checkAnswer( OneRowRelation().selectExpr("array_position(array(array(1)), array(1.0D))"), Seq(Row(1L)) ) checkAnswer( OneRowRelation().selectExpr("array_position(array(array(1)), array(1.23D))"), Seq(Row(0L)) ) checkAnswer( OneRowRelation().selectExpr("array_position(array(array(1), null)[0], 1)"), Seq(Row(1L)) ) checkAnswer( OneRowRelation().selectExpr("array_position(array(1, null), array(1, null)[0])"), Seq(Row(1L)) ) val e1 = intercept[AnalysisException] { Seq(("a string element", "a")).toDF().selectExpr("array_position(_1, _2)") } val errorMsg1 = s""" |Input to function array_position should have been array followed by a |value with same element type, but it's [string, string]. """.stripMargin.replace("\\n", " ").trim() assert(e1.message.contains(errorMsg1)) val e2 = intercept[AnalysisException] { OneRowRelation().selectExpr("array_position(array(1), '1')") } val errorMsg2 = s""" |Input to function array_position should have been array followed by a |value with same element type, but it's [array<int>, string]. """.stripMargin.replace("\\n", " ").trim() assert(e2.message.contains(errorMsg2)) } ignore("element_at function") { val df = Seq( (Seq[String]("1", "2", "3"), 1), (Seq[String](null, ""), -1), (Seq[String](), 2) ).toDF("a", "b") intercept[Exception] { checkAnswer( df.select(element_at(df("a"), 0)), Seq(Row(null), Row(null), Row(null)) ) }.getMessage.contains("SQL array indices start at 1") intercept[Exception] { checkAnswer( df.select(element_at(df("a"), 1.1)), Seq(Row(null), Row(null), Row(null)) ) } checkAnswer( df.select(element_at(df("a"), 4)), Seq(Row(null), Row(null), Row(null)) ) checkAnswer( df.select(element_at(df("a"), df("b"))), Seq(Row("1"), Row(""), Row(null)) ) checkAnswer( df.selectExpr("element_at(a, b)"), Seq(Row("1"), Row(""), Row(null)) ) checkAnswer( df.select(element_at(df("a"), 1)), Seq(Row("1"), Row(null), Row(null)) ) checkAnswer( df.select(element_at(df("a"), -1)), Seq(Row("3"), Row(""), Row(null)) ) checkAnswer( df.selectExpr("element_at(a, 4)"), Seq(Row(null), Row(null), Row(null)) ) checkAnswer( df.selectExpr("element_at(a, 1)"), Seq(Row("1"), Row(null), Row(null)) ) checkAnswer( df.selectExpr("element_at(a, -1)"), Seq(Row("3"), Row(""), Row(null)) ) val e1 = intercept[AnalysisException] { Seq(("a string element", 1)).toDF().selectExpr("element_at(_1, _2)") } val errorMsg1 = s""" |The first argument to function element_at should have been array or map type, but |its string type. """.stripMargin.replace("\\n", " ").trim() assert(e1.message.contains(errorMsg1)) checkAnswer( OneRowRelation().selectExpr("element_at(array(2, 1), 2S)"), Seq(Row(1)) ) checkAnswer( OneRowRelation().selectExpr("element_at(array('a', 'b'), 1Y)"), Seq(Row("a")) ) checkAnswer( OneRowRelation().selectExpr("element_at(array(1, 2, 3), 3)"), Seq(Row(3)) ) val e2 = intercept[AnalysisException] { OneRowRelation().selectExpr("element_at(array('a', 'b'), 1L)") } val errorMsg2 = s""" |Input to function element_at should have been array followed by a int, but it's |[array<string>, bigint]. """.stripMargin.replace("\\n", " ").trim() assert(e2.message.contains(errorMsg2)) checkAnswer( OneRowRelation().selectExpr("element_at(map(1, 'a', 2, 'b'), 2Y)"), Seq(Row("b")) ) checkAnswer( OneRowRelation().selectExpr("element_at(map(1, 'a', 2, 'b'), 1S)"), Seq(Row("a")) ) checkAnswer( OneRowRelation().selectExpr("element_at(map(1, 'a', 2, 'b'), 2)"), Seq(Row("b")) ) checkAnswer( OneRowRelation().selectExpr("element_at(map(1, 'a', 2, 'b'), 2L)"), Seq(Row("b")) ) checkAnswer( OneRowRelation().selectExpr("element_at(map(1, 'a', 2, 'b'), 1.0D)"), Seq(Row("a")) ) checkAnswer( OneRowRelation().selectExpr("element_at(map(1, 'a', 2, 'b'), 1.23D)"), Seq(Row(null)) ) val e3 = intercept[AnalysisException] { OneRowRelation().selectExpr("element_at(map(1, 'a', 2, 'b'), '1')") } val errorMsg3 = s""" |Input to function element_at should have been map followed by a value of same |key type, but it's [map<int,string>, string]. """.stripMargin.replace("\\n", " ").trim() assert(e3.message.contains(errorMsg3)) } test("array_union functions") { val df1 = Seq((Array(1, 2, 3), Array(4, 2))).toDF("a", "b") val ans1 = Row(Seq(1, 2, 3, 4)) checkAnswer(df1.select(array_union($"a", $"b")), ans1) checkAnswer(df1.selectExpr("array_union(a, b)"), ans1) val df2 = Seq((Array[Integer](1, 2, null, 4, 5), Array(-5, 4, -3, 2, -1))).toDF("a", "b") val ans2 = Row(Seq(1, 2, null, 4, 5, -5, -3, -1)) checkAnswer(df2.select(array_union($"a", $"b")), ans2) checkAnswer(df2.selectExpr("array_union(a, b)"), ans2) val df3 = Seq((Array(1L, 2L, 3L), Array(4L, 2L))).toDF("a", "b") val ans3 = Row(Seq(1L, 2L, 3L, 4L)) checkAnswer(df3.select(array_union($"a", $"b")), ans3) checkAnswer(df3.selectExpr("array_union(a, b)"), ans3) val df4 = Seq((Array[java.lang.Long](1L, 2L, null, 4L, 5L), Array(-5L, 4L, -3L, 2L, -1L))) .toDF("a", "b") val ans4 = Row(Seq(1L, 2L, null, 4L, 5L, -5L, -3L, -1L)) checkAnswer(df4.select(array_union($"a", $"b")), ans4) checkAnswer(df4.selectExpr("array_union(a, b)"), ans4) val df5 = Seq((Array("b", "a", "c"), Array("b", null, "a", "g"))).toDF("a", "b") val ans5 = Row(Seq("b", "a", "c", null, "g")) checkAnswer(df5.select(array_union($"a", $"b")), ans5) checkAnswer(df5.selectExpr("array_union(a, b)"), ans5) val df6 = Seq((null, Array("a"))).toDF("a", "b") assert(intercept[AnalysisException] { df6.select(array_union($"a", $"b")) }.getMessage.contains("data type mismatch")) assert(intercept[AnalysisException] { df6.selectExpr("array_union(a, b)") }.getMessage.contains("data type mismatch")) val df7 = Seq((null, null)).toDF("a", "b") assert(intercept[AnalysisException] { df7.select(array_union($"a", $"b")) }.getMessage.contains("data type mismatch")) assert(intercept[AnalysisException] { df7.selectExpr("array_union(a, b)") }.getMessage.contains("data type mismatch")) val df8 = Seq((Array(Array(1)), Array("a"))).toDF("a", "b") assert(intercept[AnalysisException] { df8.select(array_union($"a", $"b")) }.getMessage.contains("data type mismatch")) assert(intercept[AnalysisException] { df8.selectExpr("array_union(a, b)") }.getMessage.contains("data type mismatch")) } ignore("concat function - arrays") { val nseqi : Seq[Int] = null val nseqs : Seq[String] = null val df = Seq( (Seq(1), Seq(2, 3), Seq(5L, 6L), nseqi, Seq("a", "b", "c"), Seq("d", "e"), Seq("f"), nseqs), (Seq(1, 0), Seq.empty[Int], Seq(2L), nseqi, Seq("a"), Seq.empty[String], Seq(null), nseqs) ).toDF("i1", "i2", "i3", "in", "s1", "s2", "s3", "sn") // Simple test cases def simpleTest(): Unit = { checkAnswer ( df.select(concat($"i1", $"s1")), Seq(Row(Seq("1", "a", "b", "c")), Row(Seq("1", "0", "a"))) ) checkAnswer( df.select(concat($"i1", $"i2", $"i3")), Seq(Row(Seq(1, 2, 3, 5, 6)), Row(Seq(1, 0, 2))) ) checkAnswer( df.selectExpr("concat(array(1, null), i2, i3)"), Seq(Row(Seq(1, null, 2, 3, 5, 6)), Row(Seq(1, null, 2))) ) checkAnswer( df.select(concat($"s1", $"s2", $"s3")), Seq(Row(Seq("a", "b", "c", "d", "e", "f")), Row(Seq("a", null))) ) checkAnswer( df.selectExpr("concat(s1, s2, s3)"), Seq(Row(Seq("a", "b", "c", "d", "e", "f")), Row(Seq("a", null))) ) } // Test with local relation, the Project will be evaluated without codegen simpleTest() // Test with cached relation, the Project will be evaluated with codegen df.cache() simpleTest() // Null test cases def nullTest(): Unit = { checkAnswer( df.select(concat($"i1", $"in")), Seq(Row(null), Row(null)) ) checkAnswer( df.select(concat($"in", $"i1")), Seq(Row(null), Row(null)) ) checkAnswer( df.select(concat($"s1", $"sn")), Seq(Row(null), Row(null)) ) checkAnswer( df.select(concat($"sn", $"s1")), Seq(Row(null), Row(null)) ) } // Test with local relation, the Project will be evaluated without codegen df.unpersist(blocking = true) nullTest() // Test with cached relation, the Project will be evaluated with codegen df.cache() nullTest() // Type error test cases intercept[AnalysisException] { df.selectExpr("concat(i1, i2, null)") } intercept[AnalysisException] { df.selectExpr("concat(i1, array(i1, i2))") } val e = intercept[AnalysisException] { df.selectExpr("concat(map(1, 2), map(3, 4))") } assert(e.getMessage.contains("string, binary or array")) } ignore("SPARK-31227: Non-nullable null type should not coerce to nullable type in concat") { val actual = spark.range(1).selectExpr("concat(array(), array(1)) as arr") val expected = spark.range(1).selectExpr("array(1) as arr") checkAnswer(actual, expected) assert(actual.schema === expected.schema) } ignore("flatten function") { // Test cases with a primitive type val intDF = Seq( (Seq(Seq(1, 2, 3), Seq(4, 5), Seq(6))), (Seq(Seq(1, 2))), (Seq(Seq(1), Seq.empty)), (Seq(Seq.empty, Seq(1))), (Seq(Seq.empty, Seq.empty)), (Seq(Seq(1), null)), (Seq(null, Seq(1))), (Seq(null, null)) ).toDF("i") val intDFResult = Seq( Row(Seq(1, 2, 3, 4, 5, 6)), Row(Seq(1, 2)), Row(Seq(1)), Row(Seq(1)), Row(Seq.empty), Row(null), Row(null), Row(null)) def testInt(): Unit = { checkAnswer(intDF.select(flatten($"i")), intDFResult) checkAnswer(intDF.selectExpr("flatten(i)"), intDFResult) } // Test with local relation, the Project will be evaluated without codegen testInt() // Test with cached relation, the Project will be evaluated with codegen intDF.cache() testInt() // Test cases with non-primitive types val strDF = Seq( (Seq(Seq("a", "b"), Seq("c"), Seq("d", "e", "f"))), (Seq(Seq("a", "b"))), (Seq(Seq("a", null), Seq(null, "b"), Seq(null, null))), (Seq(Seq("a"), Seq.empty)), (Seq(Seq.empty, Seq("a"))), (Seq(Seq.empty, Seq.empty)), (Seq(Seq("a"), null)), (Seq(null, Seq("a"))), (Seq(null, null)) ).toDF("s") val strDFResult = Seq( Row(Seq("a", "b", "c", "d", "e", "f")), Row(Seq("a", "b")), Row(Seq("a", null, null, "b", null, null)), Row(Seq("a")), Row(Seq("a")), Row(Seq.empty), Row(null), Row(null), Row(null)) def testString(): Unit = { checkAnswer(strDF.select(flatten($"s")), strDFResult) checkAnswer(strDF.selectExpr("flatten(s)"), strDFResult) } // Test with local relation, the Project will be evaluated without codegen testString() // Test with cached relation, the Project will be evaluated with codegen strDF.cache() testString() val arrDF = Seq((1, "a", Seq(1, 2, 3))).toDF("i", "s", "arr") def testArray(): Unit = { checkAnswer( arrDF.selectExpr("flatten(array(arr, array(null, 5), array(6, null)))"), Seq(Row(Seq(1, 2, 3, null, 5, 6, null)))) checkAnswer( arrDF.selectExpr("flatten(array(array(arr, arr), array(arr)))"), Seq(Row(Seq(Seq(1, 2, 3), Seq(1, 2, 3), Seq(1, 2, 3))))) } // Test with local relation, the Project will be evaluated without codegen testArray() // Test with cached relation, the Project will be evaluated with codegen arrDF.cache() testArray() // Error test cases val oneRowDF = Seq((1, "a", Seq(1, 2, 3))).toDF("i", "s", "arr") intercept[AnalysisException] { oneRowDF.select(flatten($"arr")) } intercept[AnalysisException] { oneRowDF.select(flatten($"i")) } intercept[AnalysisException] { oneRowDF.select(flatten($"s")) } intercept[AnalysisException] { oneRowDF.selectExpr("flatten(null)") } } test("array_repeat function") { val strDF = Seq( ("hi", 2), (null, 2) ).toDF("a", "b") val strDFTwiceResult = Seq( Row(Seq("hi", "hi")), Row(Seq(null, null)) ) def testString(): Unit = { checkAnswer(strDF.select(array_repeat($"a", 2)), strDFTwiceResult) checkAnswer(strDF.select(array_repeat($"a", $"b")), strDFTwiceResult) checkAnswer(strDF.selectExpr("array_repeat(a, 2)"), strDFTwiceResult) checkAnswer(strDF.selectExpr("array_repeat(a, b)"), strDFTwiceResult) } // Test with local relation, the Project will be evaluated without codegen testString() // Test with cached relation, the Project will be evaluated with codegen strDF.cache() testString() val intDF = { val schema = StructType(Seq( StructField("a", IntegerType), StructField("b", IntegerType))) val data = Seq( Row(3, 2), Row(null, 2) ) spark.createDataFrame(spark.sparkContext.parallelize(data), schema) } val intDFTwiceResult = Seq( Row(Seq(3, 3)), Row(Seq(null, null)) ) def testInt(): Unit = { checkAnswer(intDF.select(array_repeat($"a", 2)), intDFTwiceResult) checkAnswer(intDF.select(array_repeat($"a", $"b")), intDFTwiceResult) checkAnswer(intDF.selectExpr("array_repeat(a, 2)"), intDFTwiceResult) checkAnswer(intDF.selectExpr("array_repeat(a, b)"), intDFTwiceResult) } // Test with local relation, the Project will be evaluated without codegen testInt() // Test with cached relation, the Project will be evaluated with codegen intDF.cache() testInt() val nullCountDF = { val schema = StructType(Seq( StructField("a", StringType), StructField("b", IntegerType))) val data = Seq( Row("hi", null), Row(null, null) ) spark.createDataFrame(spark.sparkContext.parallelize(data), schema) } def testNull(): Unit = { checkAnswer( nullCountDF.select(array_repeat($"a", $"b")), Seq(Row(null), Row(null)) ) } // Test with local relation, the Project will be evaluated without codegen testNull() // Test with cached relation, the Project will be evaluated with codegen nullCountDF.cache() testNull() // Error test cases val invalidTypeDF = Seq(("hi", "1")).toDF("a", "b") intercept[AnalysisException] { invalidTypeDF.select(array_repeat($"a", $"b")) } intercept[AnalysisException] { invalidTypeDF.select(array_repeat($"a", lit("1"))) } intercept[AnalysisException] { invalidTypeDF.selectExpr("array_repeat(a, 1.0)") } } ignore("array remove") { val df = Seq( (Array[Int](2, 1, 2, 3), Array("a", "b", "c", "a"), Array("", ""), 2), (Array.empty[Int], Array.empty[String], Array.empty[String], 2), (null, null, null, 2) ).toDF("a", "b", "c", "d") checkAnswer( df.select(array_remove($"a", 2), array_remove($"b", "a"), array_remove($"c", "")), Seq( Row(Seq(1, 3), Seq("b", "c"), Seq.empty[String]), Row(Seq.empty[Int], Seq.empty[String], Seq.empty[String]), Row(null, null, null)) ) checkAnswer( df.select(array_remove($"a", $"d")), Seq( Row(Seq(1, 3)), Row(Seq.empty[Int]), Row(null)) ) checkAnswer( df.selectExpr("array_remove(a, d)"), Seq( Row(Seq(1, 3)), Row(Seq.empty[Int]), Row(null)) ) checkAnswer( OneRowRelation().selectExpr("array_remove(array(1, 2), 1.23D)"), Seq( Row(Seq(1.0, 2.0)) ) ) checkAnswer( OneRowRelation().selectExpr("array_remove(array(1, 2), 1.0D)"), Seq( Row(Seq(2.0)) ) ) checkAnswer( OneRowRelation().selectExpr("array_remove(array(1.0D, 2.0D), 2)"), Seq( Row(Seq(1.0)) ) ) checkAnswer( OneRowRelation().selectExpr("array_remove(array(1.1D, 1.2D), 1)"), Seq( Row(Seq(1.1, 1.2)) ) ) checkAnswer( df.selectExpr("array_remove(a, 2)", "array_remove(b, \\"a\\")", "array_remove(c, \\"\\")"), Seq( Row(Seq(1, 3), Seq("b", "c"), Seq.empty[String]), Row(Seq.empty[Int], Seq.empty[String], Seq.empty[String]), Row(null, null, null)) ) val e1 = intercept[AnalysisException] { Seq(("a string element", "a")).toDF().selectExpr("array_remove(_1, _2)") } val errorMsg1 = s""" |Input to function array_remove should have been array followed by a |value with same element type, but it's [string, string]. """.stripMargin.replace("\\n", " ").trim() assert(e1.message.contains(errorMsg1)) val e2 = intercept[AnalysisException] { OneRowRelation().selectExpr("array_remove(array(1, 2), '1')") } val errorMsg2 = s""" |Input to function array_remove should have been array followed by a |value with same element type, but it's [array<int>, string]. """.stripMargin.replace("\\n", " ").trim() assert(e2.message.contains(errorMsg2)) } test("array_distinct functions") { val df = Seq( (Array[Int](2, 1, 3, 4, 3, 5), Array("b", "c", "a", "c", "b", "", "")), (Array.empty[Int], Array.empty[String]), (null, null) ).toDF("a", "b") checkAnswer( df.select(array_distinct($"a"), array_distinct($"b")), Seq( Row(Seq(2, 1, 3, 4, 5), Seq("b", "c", "a", "")), Row(Seq.empty[Int], Seq.empty[String]), Row(null, null)) ) checkAnswer( df.selectExpr("array_distinct(a)", "array_distinct(b)"), Seq( Row(Seq(2, 1, 3, 4, 5), Seq("b", "c", "a", "")), Row(Seq.empty[Int], Seq.empty[String]), Row(null, null)) ) } // Shuffle expressions should produce same results at retries in the same DataFrame. private def checkShuffleResult(df: DataFrame): Unit = { checkAnswer(df, df.collect()) } ignore("shuffle function - array for primitive type not containing null") { val idfNotContainsNull = Seq( Seq(1, 9, 8, 7), Seq(5, 8, 9, 7, 2), Seq.empty, null ).toDF("i") def testArrayOfPrimitiveTypeNotContainsNull(): Unit = { checkShuffleResult(idfNotContainsNull.select(shuffle($"i"))) checkShuffleResult(idfNotContainsNull.selectExpr("shuffle(i)")) } // Test with local relation, the Project will be evaluated without codegen testArrayOfPrimitiveTypeNotContainsNull() // Test with cached relation, the Project will be evaluated with codegen idfNotContainsNull.cache() testArrayOfPrimitiveTypeNotContainsNull() } ignore("shuffle function - array for primitive type containing null") { val idfContainsNull = Seq[Seq[Integer]]( Seq(1, 9, 8, null, 7), Seq(null, 5, 8, 9, 7, 2), Seq.empty, null ).toDF("i") def testArrayOfPrimitiveTypeContainsNull(): Unit = { checkShuffleResult(idfContainsNull.select(shuffle($"i"))) checkShuffleResult(idfContainsNull.selectExpr("shuffle(i)")) } // Test with local relation, the Project will be evaluated without codegen testArrayOfPrimitiveTypeContainsNull() // Test with cached relation, the Project will be evaluated with codegen idfContainsNull.cache() testArrayOfPrimitiveTypeContainsNull() } ignore("shuffle function - array for non-primitive type") { val sdf = Seq( Seq("c", "a", "b"), Seq("b", null, "c", null), Seq.empty, null ).toDF("s") def testNonPrimitiveType(): Unit = { checkShuffleResult(sdf.select(shuffle($"s"))) checkShuffleResult(sdf.selectExpr("shuffle(s)")) } // Test with local relation, the Project will be evaluated without codegen testNonPrimitiveType() // Test with cached relation, the Project will be evaluated with codegen sdf.cache() testNonPrimitiveType() } test("array_except functions") { val df1 = Seq((Array(1, 2, 4), Array(4, 2))).toDF("a", "b") val ans1 = Row(Seq(1)) checkAnswer(df1.select(array_except($"a", $"b")), ans1) checkAnswer(df1.selectExpr("array_except(a, b)"), ans1) val df2 = Seq((Array[Integer](1, 2, null, 4, 5), Array[Integer](-5, 4, null, 2, -1))) .toDF("a", "b") val ans2 = Row(Seq(1, 5)) checkAnswer(df2.select(array_except($"a", $"b")), ans2) checkAnswer(df2.selectExpr("array_except(a, b)"), ans2) val df3 = Seq((Array(1L, 2L, 4L), Array(4L, 2L))).toDF("a", "b") val ans3 = Row(Seq(1L)) checkAnswer(df3.select(array_except($"a", $"b")), ans3) checkAnswer(df3.selectExpr("array_except(a, b)"), ans3) val df4 = Seq( (Array[java.lang.Long](1L, 2L, null, 4L, 5L), Array[java.lang.Long](-5L, 4L, null, 2L, -1L))) .toDF("a", "b") val ans4 = Row(Seq(1L, 5L)) checkAnswer(df4.select(array_except($"a", $"b")), ans4) checkAnswer(df4.selectExpr("array_except(a, b)"), ans4) val df5 = Seq((Array("c", null, "a", "f"), Array("b", null, "a", "g"))).toDF("a", "b") val ans5 = Row(Seq("c", "f")) checkAnswer(df5.select(array_except($"a", $"b")), ans5) checkAnswer(df5.selectExpr("array_except(a, b)"), ans5) val df6 = Seq((null, null)).toDF("a", "b") intercept[AnalysisException] { df6.select(array_except($"a", $"b")) } intercept[AnalysisException] { df6.selectExpr("array_except(a, b)") } val df7 = Seq((Array(1), Array("a"))).toDF("a", "b") intercept[AnalysisException] { df7.select(array_except($"a", $"b")) } intercept[AnalysisException] { df7.selectExpr("array_except(a, b)") } val df8 = Seq((Array("a"), null)).toDF("a", "b") intercept[AnalysisException] { df8.select(array_except($"a", $"b")) } intercept[AnalysisException] { df8.selectExpr("array_except(a, b)") } val df9 = Seq((null, Array("a"))).toDF("a", "b") intercept[AnalysisException] { df9.select(array_except($"a", $"b")) } intercept[AnalysisException] { df9.selectExpr("array_except(a, b)") } val df10 = Seq( (Array[Integer](1, 2), Array[Integer](2)), (Array[Integer](1, 2), Array[Integer](1, null)), (Array[Integer](1, null, 3), Array[Integer](1, 2)), (Array[Integer](1, null), Array[Integer](2, null)) ).toDF("a", "b") val result10 = df10.select(array_except($"a", $"b")) val expectedType10 = ArrayType(IntegerType, containsNull = true) assert(result10.first.schema(0).dataType === expectedType10) } test("array_intersect functions") { val df1 = Seq((Array(1, 2, 4), Array(4, 2))).toDF("a", "b") val ans1 = Row(Seq(2, 4)) checkAnswer(df1.select(array_intersect($"a", $"b")), ans1) checkAnswer(df1.selectExpr("array_intersect(a, b)"), ans1) val df2 = Seq((Array[Integer](1, 2, null, 4, 5), Array[Integer](-5, 4, null, 2, -1))) .toDF("a", "b") val ans2 = Row(Seq(2, null, 4)) checkAnswer(df2.select(array_intersect($"a", $"b")), ans2) checkAnswer(df2.selectExpr("array_intersect(a, b)"), ans2) val df3 = Seq((Array(1L, 2L, 4L), Array(4L, 2L))).toDF("a", "b") val ans3 = Row(Seq(2L, 4L)) checkAnswer(df3.select(array_intersect($"a", $"b")), ans3) checkAnswer(df3.selectExpr("array_intersect(a, b)"), ans3) val df4 = Seq( (Array[java.lang.Long](1L, 2L, null, 4L, 5L), Array[java.lang.Long](-5L, 4L, null, 2L, -1L))) .toDF("a", "b") val ans4 = Row(Seq(2L, null, 4L)) checkAnswer(df4.select(array_intersect($"a", $"b")), ans4) checkAnswer(df4.selectExpr("array_intersect(a, b)"), ans4) val df5 = Seq((Array("c", null, "a", "f"), Array("b", "a", null, "g"))).toDF("a", "b") val ans5 = Row(Seq(null, "a")) checkAnswer(df5.select(array_intersect($"a", $"b")), ans5) checkAnswer(df5.selectExpr("array_intersect(a, b)"), ans5) val df6 = Seq((null, null)).toDF("a", "b") assert(intercept[AnalysisException] { df6.select(array_intersect($"a", $"b")) }.getMessage.contains("data type mismatch")) assert(intercept[AnalysisException] { df6.selectExpr("array_intersect(a, b)") }.getMessage.contains("data type mismatch")) val df7 = Seq((Array(1), Array("a"))).toDF("a", "b") assert(intercept[AnalysisException] { df7.select(array_intersect($"a", $"b")) }.getMessage.contains("data type mismatch")) assert(intercept[AnalysisException] { df7.selectExpr("array_intersect(a, b)") }.getMessage.contains("data type mismatch")) val df8 = Seq((null, Array("a"))).toDF("a", "b") assert(intercept[AnalysisException] { df8.select(array_intersect($"a", $"b")) }.getMessage.contains("data type mismatch")) assert(intercept[AnalysisException] { df8.selectExpr("array_intersect(a, b)") }.getMessage.contains("data type mismatch")) } ignore("transform function - array for primitive type not containing null") { val df = Seq( Seq(1, 9, 8, 7), Seq(5, 8, 9, 7, 2), Seq.empty, null ).toDF("i") def testArrayOfPrimitiveTypeNotContainsNull(): Unit = { checkAnswer(df.selectExpr("transform(i, x -> x + 1)"), Seq( Row(Seq(2, 10, 9, 8)), Row(Seq(6, 9, 10, 8, 3)), Row(Seq.empty), Row(null))) checkAnswer(df.selectExpr("transform(i, (x, i) -> x + i)"), Seq( Row(Seq(1, 10, 10, 10)), Row(Seq(5, 9, 11, 10, 6)), Row(Seq.empty), Row(null))) checkAnswer(df.select(transform(col("i"), x => x + 1)), Seq( Row(Seq(2, 10, 9, 8)), Row(Seq(6, 9, 10, 8, 3)), Row(Seq.empty), Row(null))) checkAnswer(df.select(transform(col("i"), (x, i) => x + i)), Seq( Row(Seq(1, 10, 10, 10)), Row(Seq(5, 9, 11, 10, 6)), Row(Seq.empty), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testArrayOfPrimitiveTypeNotContainsNull() // Test with cached relation, the Project will be evaluated with codegen df.cache() testArrayOfPrimitiveTypeNotContainsNull() } ignore("transform function - array for primitive type containing null") { val df = Seq[Seq[Integer]]( Seq(1, 9, 8, null, 7), Seq(5, null, 8, 9, 7, 2), Seq.empty, null ).toDF("i") def testArrayOfPrimitiveTypeContainsNull(): Unit = { checkAnswer(df.selectExpr("transform(i, x -> x + 1)"), Seq( Row(Seq(2, 10, 9, null, 8)), Row(Seq(6, null, 9, 10, 8, 3)), Row(Seq.empty), Row(null))) checkAnswer(df.selectExpr("transform(i, (x, i) -> x + i)"), Seq( Row(Seq(1, 10, 10, null, 11)), Row(Seq(5, null, 10, 12, 11, 7)), Row(Seq.empty), Row(null))) checkAnswer(df.select(transform(col("i"), x => x + 1)), Seq( Row(Seq(2, 10, 9, null, 8)), Row(Seq(6, null, 9, 10, 8, 3)), Row(Seq.empty), Row(null))) checkAnswer(df.select(transform(col("i"), (x, i) => x + i)), Seq( Row(Seq(1, 10, 10, null, 11)), Row(Seq(5, null, 10, 12, 11, 7)), Row(Seq.empty), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testArrayOfPrimitiveTypeContainsNull() // Test with cached relation, the Project will be evaluated with codegen df.cache() testArrayOfPrimitiveTypeContainsNull() } ignore("transform function - array for non-primitive type") { val df = Seq( Seq("c", "a", "b"), Seq("b", null, "c", null), Seq.empty, null ).toDF("s") def testNonPrimitiveType(): Unit = { checkAnswer(df.selectExpr("transform(s, x -> concat(x, x))"), Seq( Row(Seq("cc", "aa", "bb")), Row(Seq("bb", null, "cc", null)), Row(Seq.empty), Row(null))) checkAnswer(df.selectExpr("transform(s, (x, i) -> concat(x, i))"), Seq( Row(Seq("c0", "a1", "b2")), Row(Seq("b0", null, "c2", null)), Row(Seq.empty), Row(null))) checkAnswer(df.select(transform(col("s"), x => concat(x, x))), Seq( Row(Seq("cc", "aa", "bb")), Row(Seq("bb", null, "cc", null)), Row(Seq.empty), Row(null))) checkAnswer(df.select(transform(col("s"), (x, i) => concat(x, i))), Seq( Row(Seq("c0", "a1", "b2")), Row(Seq("b0", null, "c2", null)), Row(Seq.empty), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testNonPrimitiveType() // Test with cached relation, the Project will be evaluated with codegen df.cache() testNonPrimitiveType() } ignore("transform function - special cases") { val df = Seq( Seq("c", "a", "b"), Seq("b", null, "c", null), Seq.empty, null ).toDF("arg") def testSpecialCases(): Unit = { checkAnswer(df.selectExpr("transform(arg, arg -> arg)"), Seq( Row(Seq("c", "a", "b")), Row(Seq("b", null, "c", null)), Row(Seq.empty), Row(null))) checkAnswer(df.selectExpr("transform(arg, arg)"), Seq( Row(Seq(Seq("c", "a", "b"), Seq("c", "a", "b"), Seq("c", "a", "b"))), Row(Seq( Seq("b", null, "c", null), Seq("b", null, "c", null), Seq("b", null, "c", null), Seq("b", null, "c", null))), Row(Seq.empty), Row(null))) checkAnswer(df.selectExpr("transform(arg, x -> concat(arg, array(x)))"), Seq( Row(Seq(Seq("c", "a", "b", "c"), Seq("c", "a", "b", "a"), Seq("c", "a", "b", "b"))), Row(Seq( Seq("b", null, "c", null, "b"), Seq("b", null, "c", null, null), Seq("b", null, "c", null, "c"), Seq("b", null, "c", null, null))), Row(Seq.empty), Row(null))) checkAnswer(df.select(transform(col("arg"), arg => arg)), Seq( Row(Seq("c", "a", "b")), Row(Seq("b", null, "c", null)), Row(Seq.empty), Row(null))) checkAnswer(df.select(transform(col("arg"), _ => col("arg"))), Seq( Row(Seq(Seq("c", "a", "b"), Seq("c", "a", "b"), Seq("c", "a", "b"))), Row(Seq( Seq("b", null, "c", null), Seq("b", null, "c", null), Seq("b", null, "c", null), Seq("b", null, "c", null))), Row(Seq.empty), Row(null))) checkAnswer(df.select(transform(col("arg"), x => concat(col("arg"), array(x)))), Seq( Row(Seq(Seq("c", "a", "b", "c"), Seq("c", "a", "b", "a"), Seq("c", "a", "b", "b"))), Row(Seq( Seq("b", null, "c", null, "b"), Seq("b", null, "c", null, null), Seq("b", null, "c", null, "c"), Seq("b", null, "c", null, null))), Row(Seq.empty), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testSpecialCases() // Test with cached relation, the Project will be evaluated with codegen df.cache() testSpecialCases() } test("transform function - invalid") { val df = Seq( (Seq("c", "a", "b"), 1), (Seq("b", null, "c", null), 2), (Seq.empty, 3), (null, 4) ).toDF("s", "i") val ex1 = intercept[AnalysisException] { df.selectExpr("transform(s, (x, y, z) -> x + y + z)") } assert(ex1.getMessage.contains("The number of lambda function arguments '3' does not match")) val ex2 = intercept[AnalysisException] { df.selectExpr("transform(i, x -> x)") } assert(ex2.getMessage.contains("data type mismatch: argument 1 requires array type")) val ex3 = intercept[AnalysisException] { df.selectExpr("transform(a, x -> x)") } assert(ex3.getMessage.contains("cannot resolve '`a`'")) } test("map_filter") { val dfInts = Seq( Map(1 -> 10, 2 -> 20, 3 -> 30), Map(1 -> -1, 2 -> -2, 3 -> -3), Map(1 -> 10, 2 -> 5, 3 -> -3)).toDF("m") checkAnswer(dfInts.selectExpr( "map_filter(m, (k, v) -> k * 10 = v)", "map_filter(m, (k, v) -> k = -v)"), Seq( Row(Map(1 -> 10, 2 -> 20, 3 -> 30), Map()), Row(Map(), Map(1 -> -1, 2 -> -2, 3 -> -3)), Row(Map(1 -> 10), Map(3 -> -3)))) checkAnswer(dfInts.select( map_filter(col("m"), (k, v) => k * 10 === v), map_filter(col("m"), (k, v) => k === (v * -1))), Seq( Row(Map(1 -> 10, 2 -> 20, 3 -> 30), Map()), Row(Map(), Map(1 -> -1, 2 -> -2, 3 -> -3)), Row(Map(1 -> 10), Map(3 -> -3)))) val dfComplex = Seq( Map(1 -> Seq(Some(1)), 2 -> Seq(Some(1), Some(2)), 3 -> Seq(Some(1), Some(2), Some(3))), Map(1 -> null, 2 -> Seq(Some(-2), Some(-2)), 3 -> Seq[Option[Int]](None))).toDF("m") checkAnswer(dfComplex.selectExpr( "map_filter(m, (k, v) -> k = v[0])", "map_filter(m, (k, v) -> k = size(v))"), Seq( Row(Map(1 -> Seq(1)), Map(1 -> Seq(1), 2 -> Seq(1, 2), 3 -> Seq(1, 2, 3))), Row(Map(), Map(2 -> Seq(-2, -2))))) checkAnswer(dfComplex.select( map_filter(col("m"), (k, v) => k === element_at(v, 1)), map_filter(col("m"), (k, v) => k === size(v))), Seq( Row(Map(1 -> Seq(1)), Map(1 -> Seq(1), 2 -> Seq(1, 2), 3 -> Seq(1, 2, 3))), Row(Map(), Map(2 -> Seq(-2, -2))))) // Invalid use cases val df = Seq( (Map(1 -> "a"), 1), (Map.empty[Int, String], 2), (null, 3) ).toDF("s", "i") val ex1 = intercept[AnalysisException] { df.selectExpr("map_filter(s, (x, y, z) -> x + y + z)") } assert(ex1.getMessage.contains("The number of lambda function arguments '3' does not match")) val ex2 = intercept[AnalysisException] { df.selectExpr("map_filter(s, x -> x)") } assert(ex2.getMessage.contains("The number of lambda function arguments '1' does not match")) val ex3 = intercept[AnalysisException] { df.selectExpr("map_filter(i, (k, v) -> k > v)") } assert(ex3.getMessage.contains("data type mismatch: argument 1 requires map type")) val ex3a = intercept[AnalysisException] { df.select(map_filter(col("i"), (k, v) => k > v)) } assert(ex3a.getMessage.contains("data type mismatch: argument 1 requires map type")) val ex4 = intercept[AnalysisException] { df.selectExpr("map_filter(a, (k, v) -> k > v)") } assert(ex4.getMessage.contains("cannot resolve '`a`'")) } ignore("filter function - array for primitive type not containing null") { val df = Seq( Seq(1, 9, 8, 7), Seq(5, 8, 9, 7, 2), Seq.empty, null ).toDF("i") def testArrayOfPrimitiveTypeNotContainsNull(): Unit = { checkAnswer(df.selectExpr("filter(i, x -> x % 2 == 0)"), Seq( Row(Seq(8)), Row(Seq(8, 2)), Row(Seq.empty), Row(null))) checkAnswer(df.select(filter(col("i"), _ % 2 === 0)), Seq( Row(Seq(8)), Row(Seq(8, 2)), Row(Seq.empty), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testArrayOfPrimitiveTypeNotContainsNull() // Test with cached relation, the Project will be evaluated with codegen df.cache() testArrayOfPrimitiveTypeNotContainsNull() } ignore("filter function - array for primitive type containing null") { val df = Seq[Seq[Integer]]( Seq(1, 9, 8, null, 7), Seq(5, null, 8, 9, 7, 2), Seq.empty, null ).toDF("i") def testArrayOfPrimitiveTypeContainsNull(): Unit = { checkAnswer(df.selectExpr("filter(i, x -> x % 2 == 0)"), Seq( Row(Seq(8)), Row(Seq(8, 2)), Row(Seq.empty), Row(null))) checkAnswer(df.select(filter(col("i"), _ % 2 === 0)), Seq( Row(Seq(8)), Row(Seq(8, 2)), Row(Seq.empty), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testArrayOfPrimitiveTypeContainsNull() // Test with cached relation, the Project will be evaluated with codegen df.cache() testArrayOfPrimitiveTypeContainsNull() } ignore("filter function - array for non-primitive type") { val df = Seq( Seq("c", "a", "b"), Seq("b", null, "c", null), Seq.empty, null ).toDF("s") def testNonPrimitiveType(): Unit = { checkAnswer(df.selectExpr("filter(s, x -> x is not null)"), Seq( Row(Seq("c", "a", "b")), Row(Seq("b", "c")), Row(Seq.empty), Row(null))) checkAnswer(df.select(filter(col("s"), x => x.isNotNull)), Seq( Row(Seq("c", "a", "b")), Row(Seq("b", "c")), Row(Seq.empty), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testNonPrimitiveType() // Test with cached relation, the Project will be evaluated with codegen df.cache() testNonPrimitiveType() } ignore("filter function - index argument") { val df = Seq( Seq("c", "a", "b"), Seq("b", null, "c", null), Seq.empty, null ).toDF("s") def testIndexArgument(): Unit = { checkAnswer(df.selectExpr("filter(s, (x, i) -> i % 2 == 0)"), Seq( Row(Seq("c", "b")), Row(Seq("b", "c")), Row(Seq.empty), Row(null))) checkAnswer(df.select(filter(col("s"), (x, i) => i % 2 === 0)), Seq( Row(Seq("c", "b")), Row(Seq("b", "c")), Row(Seq.empty), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testIndexArgument() // Test with cached relation, the Project will be evaluated with codegen df.cache() testIndexArgument() } test("filter function - invalid") { val df = Seq( (Seq("c", "a", "b"), 1), (Seq("b", null, "c", null), 2), (Seq.empty, 3), (null, 4) ).toDF("s", "i") val ex1 = intercept[AnalysisException] { df.selectExpr("filter(s, (x, y, z) -> x + y)") } assert(ex1.getMessage.contains("The number of lambda function arguments '3' does not match")) val ex2 = intercept[AnalysisException] { df.selectExpr("filter(i, x -> x)") } assert(ex2.getMessage.contains("data type mismatch: argument 1 requires array type")) val ex2a = intercept[AnalysisException] { df.select(filter(col("i"), x => x)) } assert(ex2a.getMessage.contains("data type mismatch: argument 1 requires array type")) val ex3 = intercept[AnalysisException] { df.selectExpr("filter(s, x -> x)") } assert(ex3.getMessage.contains("data type mismatch: argument 2 requires boolean type")) val ex3a = intercept[AnalysisException] { df.select(filter(col("s"), x => x)) } assert(ex3a.getMessage.contains("data type mismatch: argument 2 requires boolean type")) val ex4 = intercept[AnalysisException] { df.selectExpr("filter(a, x -> x)") } assert(ex4.getMessage.contains("cannot resolve '`a`'")) } ignore("exists function - array for primitive type not containing null") { val df = Seq( Seq(1, 9, 8, 7), Seq(5, 9, 7), Seq.empty, null ).toDF("i") def testArrayOfPrimitiveTypeNotContainsNull(): Unit = { checkAnswer(df.selectExpr("exists(i, x -> x % 2 == 0)"), Seq( Row(true), Row(false), Row(false), Row(null))) checkAnswer(df.select(exists(col("i"), _ % 2 === 0)), Seq( Row(true), Row(false), Row(false), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testArrayOfPrimitiveTypeNotContainsNull() // Test with cached relation, the Project will be evaluated with codegen df.cache() testArrayOfPrimitiveTypeNotContainsNull() } ignore("exists function - array for primitive type containing null") { val df = Seq[Seq[Integer]]( Seq(1, 9, 8, null, 7), Seq(1, 3, 5), Seq(5, null, null, 9, 7, null), Seq.empty, null ).toDF("i") def testArrayOfPrimitiveTypeContainsNull(): Unit = { checkAnswer(df.selectExpr("exists(i, x -> x % 2 == 0)"), Seq( Row(true), Row(false), Row(null), Row(false), Row(null))) checkAnswer(df.select(exists(col("i"), _ % 2 === 0)), Seq( Row(true), Row(false), Row(null), Row(false), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testArrayOfPrimitiveTypeContainsNull() // Test with cached relation, the Project will be evaluated with codegen df.cache() testArrayOfPrimitiveTypeContainsNull() } ignore("exists function - array for non-primitive type") { val df = Seq( Seq("c", "a", "b"), Seq("b", null, "c", null), Seq.empty, null ).toDF("s") def testNonPrimitiveType(): Unit = { checkAnswer(df.selectExpr("exists(s, x -> x is null)"), Seq( Row(false), Row(true), Row(false), Row(null))) checkAnswer(df.select(exists(col("s"), x => x.isNull)), Seq( Row(false), Row(true), Row(false), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testNonPrimitiveType() // Test with cached relation, the Project will be evaluated with codegen df.cache() testNonPrimitiveType() } test("exists function - invalid") { val df = Seq( (Seq("c", "a", "b"), 1), (Seq("b", null, "c", null), 2), (Seq.empty, 3), (null, 4) ).toDF("s", "i") val ex1 = intercept[AnalysisException] { df.selectExpr("exists(s, (x, y) -> x + y)") } assert(ex1.getMessage.contains("The number of lambda function arguments '2' does not match")) val ex2 = intercept[AnalysisException] { df.selectExpr("exists(i, x -> x)") } assert(ex2.getMessage.contains("data type mismatch: argument 1 requires array type")) val ex2a = intercept[AnalysisException] { df.select(exists(col("i"), x => x)) } assert(ex2.getMessage.contains("data type mismatch: argument 1 requires array type")) val ex3 = intercept[AnalysisException] { df.selectExpr("exists(s, x -> x)") } assert(ex3.getMessage.contains("data type mismatch: argument 2 requires boolean type")) val ex3a = intercept[AnalysisException] { df.select(exists(df("s"), x => x)) } assert(ex3a.getMessage.contains("data type mismatch: argument 2 requires boolean type")) val ex4 = intercept[AnalysisException] { df.selectExpr("exists(a, x -> x)") } assert(ex4.getMessage.contains("cannot resolve '`a`'")) } ignore("forall function - array for primitive type not containing null") { val df = Seq( Seq(1, 9, 8, 7), Seq(2, 4, 6), Seq.empty, null ).toDF("i") def testArrayOfPrimitiveTypeNotContainsNull(): Unit = { checkAnswer(df.selectExpr("forall(i, x -> x % 2 == 0)"), Seq( Row(false), Row(true), Row(true), Row(null))) checkAnswer(df.select(forall(col("i"), x => x % 2 === 0)), Seq( Row(false), Row(true), Row(true), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testArrayOfPrimitiveTypeNotContainsNull() // Test with cached relation, the Project will be evaluated with codegen df.cache() testArrayOfPrimitiveTypeNotContainsNull() } ignore("forall function - array for primitive type containing null") { val df = Seq[Seq[Integer]]( Seq(1, 9, 8, null, 7), Seq(2, null, null, 4, 6, null), Seq(2, 4, 6, 8), Seq.empty, null ).toDF("i") def testArrayOfPrimitiveTypeContainsNull(): Unit = { checkAnswer(df.selectExpr("forall(i, x -> x % 2 == 0 or x is null)"), Seq( Row(false), Row(true), Row(true), Row(true), Row(null))) checkAnswer(df.select(forall(col("i"), x => (x % 2 === 0) || x.isNull)), Seq( Row(false), Row(true), Row(true), Row(true), Row(null))) checkAnswer(df.selectExpr("forall(i, x -> x % 2 == 0)"), Seq( Row(false), Row(null), Row(true), Row(true), Row(null))) checkAnswer(df.select(forall(col("i"), x => x % 2 === 0)), Seq( Row(false), Row(null), Row(true), Row(true), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testArrayOfPrimitiveTypeContainsNull() // Test with cached relation, the Project will be evaluated with codegen df.cache() testArrayOfPrimitiveTypeContainsNull() } ignore("forall function - array for non-primitive type") { val df = Seq( Seq("c", "a", "b"), Seq[String](null, null, null, null), Seq.empty, null ).toDF("s") def testNonPrimitiveType(): Unit = { checkAnswer(df.selectExpr("forall(s, x -> x is null)"), Seq( Row(false), Row(true), Row(true), Row(null))) checkAnswer(df.select(forall(col("s"), _.isNull)), Seq( Row(false), Row(true), Row(true), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testNonPrimitiveType() // Test with cached relation, the Project will be evaluated with codegen df.cache() testNonPrimitiveType() } test("forall function - invalid") { val df = Seq( (Seq("c", "a", "b"), 1), (Seq("b", null, "c", null), 2), (Seq.empty, 3), (null, 4) ).toDF("s", "i") val ex1 = intercept[AnalysisException] { df.selectExpr("forall(s, (x, y) -> x + y)") } assert(ex1.getMessage.contains("The number of lambda function arguments '2' does not match")) val ex2 = intercept[AnalysisException] { df.selectExpr("forall(i, x -> x)") } assert(ex2.getMessage.contains("data type mismatch: argument 1 requires array type")) val ex2a = intercept[AnalysisException] { df.select(forall(col("i"), x => x)) } assert(ex2a.getMessage.contains("data type mismatch: argument 1 requires array type")) val ex3 = intercept[AnalysisException] { df.selectExpr("forall(s, x -> x)") } assert(ex3.getMessage.contains("data type mismatch: argument 2 requires boolean type")) val ex3a = intercept[AnalysisException] { df.select(forall(col("s"), x => x)) } assert(ex3a.getMessage.contains("data type mismatch: argument 2 requires boolean type")) val ex4 = intercept[AnalysisException] { df.selectExpr("forall(a, x -> x)") } assert(ex4.getMessage.contains("cannot resolve '`a`'")) val ex4a = intercept[AnalysisException] { df.select(forall(col("a"), x => x)) } assert(ex4a.getMessage.contains("cannot resolve '`a`'")) } ignore("aggregate function - array for primitive type not containing null") { val df = Seq( Seq(1, 9, 8, 7), Seq(5, 8, 9, 7, 2), Seq.empty, null ).toDF("i") def testArrayOfPrimitiveTypeNotContainsNull(): Unit = { checkAnswer(df.selectExpr("aggregate(i, 0, (acc, x) -> acc + x)"), Seq( Row(25), Row(31), Row(0), Row(null))) checkAnswer(df.selectExpr("aggregate(i, 0, (acc, x) -> acc + x, acc -> acc * 10)"), Seq( Row(250), Row(310), Row(0), Row(null))) checkAnswer(df.select(aggregate(col("i"), lit(0), (acc, x) => acc + x)), Seq( Row(25), Row(31), Row(0), Row(null))) checkAnswer(df.select(aggregate(col("i"), lit(0), (acc, x) => acc + x, _ * 10)), Seq( Row(250), Row(310), Row(0), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testArrayOfPrimitiveTypeNotContainsNull() // Test with cached relation, the Project will be evaluated with codegen df.cache() testArrayOfPrimitiveTypeNotContainsNull() } ignore("aggregate function - array for primitive type containing null") { val df = Seq[Seq[Integer]]( Seq(1, 9, 8, 7), Seq(5, null, 8, 9, 7, 2), Seq.empty, null ).toDF("i") def testArrayOfPrimitiveTypeContainsNull(): Unit = { checkAnswer(df.selectExpr("aggregate(i, 0, (acc, x) -> acc + x)"), Seq( Row(25), Row(null), Row(0), Row(null))) checkAnswer( df.selectExpr("aggregate(i, 0, (acc, x) -> acc + x, acc -> coalesce(acc, 0) * 10)"), Seq( Row(250), Row(0), Row(0), Row(null))) checkAnswer(df.select(aggregate(col("i"), lit(0), (acc, x) => acc + x)), Seq( Row(25), Row(null), Row(0), Row(null))) checkAnswer( df.select( aggregate(col("i"), lit(0), (acc, x) => acc + x, acc => coalesce(acc, lit(0)) * 10)), Seq( Row(250), Row(0), Row(0), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testArrayOfPrimitiveTypeContainsNull() // Test with cached relation, the Project will be evaluated with codegen df.cache() testArrayOfPrimitiveTypeContainsNull() } ignore("aggregate function - array for non-primitive type") { val df = Seq( (Seq("c", "a", "b"), "a"), (Seq("b", null, "c", null), "b"), (Seq.empty, "c"), (null, "d") ).toDF("ss", "s") def testNonPrimitiveType(): Unit = { checkAnswer(df.selectExpr("aggregate(ss, s, (acc, x) -> concat(acc, x))"), Seq( Row("acab"), Row(null), Row("c"), Row(null))) checkAnswer( df.selectExpr("aggregate(ss, s, (acc, x) -> concat(acc, x), acc -> coalesce(acc , ''))"), Seq( Row("acab"), Row(""), Row("c"), Row(null))) checkAnswer(df.select(aggregate(col("ss"), col("s"), (acc, x) => concat(acc, x))), Seq( Row("acab"), Row(null), Row("c"), Row(null))) checkAnswer( df.select( aggregate(col("ss"), col("s"), (acc, x) => concat(acc, x), acc => coalesce(acc, lit("")))), Seq( Row("acab"), Row(""), Row("c"), Row(null))) } // Test with local relation, the Project will be evaluated without codegen testNonPrimitiveType() // Test with cached relation, the Project will be evaluated with codegen df.cache() testNonPrimitiveType() } test("aggregate function - invalid") { val df = Seq( (Seq("c", "a", "b"), 1), (Seq("b", null, "c", null), 2), (Seq.empty, 3), (null, 4) ).toDF("s", "i") val ex1 = intercept[AnalysisException] { df.selectExpr("aggregate(s, '', x -> x)") } assert(ex1.getMessage.contains("The number of lambda function arguments '1' does not match")) val ex2 = intercept[AnalysisException] { df.selectExpr("aggregate(s, '', (acc, x) -> x, (acc, x) -> x)") } assert(ex2.getMessage.contains("The number of lambda function arguments '2' does not match")) val ex3 = intercept[AnalysisException] { df.selectExpr("aggregate(i, 0, (acc, x) -> x)") } assert(ex3.getMessage.contains("data type mismatch: argument 1 requires array type")) val ex3a = intercept[AnalysisException] { df.select(aggregate(col("i"), lit(0), (acc, x) => x)) } assert(ex3a.getMessage.contains("data type mismatch: argument 1 requires array type")) val ex4 = intercept[AnalysisException] { df.selectExpr("aggregate(s, 0, (acc, x) -> x)") } assert(ex4.getMessage.contains("data type mismatch: argument 3 requires int type")) val ex4a = intercept[AnalysisException] { df.select(aggregate(col("s"), lit(0), (acc, x) => x)) } assert(ex4a.getMessage.contains("data type mismatch: argument 3 requires int type")) val ex5 = intercept[AnalysisException] { df.selectExpr("aggregate(a, 0, (acc, x) -> x)") } assert(ex5.getMessage.contains("cannot resolve '`a`'")) } test("map_zip_with function - map of primitive types") { val df = Seq( (Map(8 -> 6L, 3 -> 5L, 6 -> 2L), Map[Integer, Integer]((6, 4), (8, 2), (3, 2))), (Map(10 -> 6L, 8 -> 3L), Map[Integer, Integer]((8, 4), (4, null))), (Map.empty[Int, Long], Map[Integer, Integer]((5, 1))), (Map(5 -> 1L), null) ).toDF("m1", "m2") checkAnswer(df.selectExpr("map_zip_with(m1, m2, (k, v1, v2) -> k == v1 + v2)"), Seq( Row(Map(8 -> true, 3 -> false, 6 -> true)), Row(Map(10 -> null, 8 -> false, 4 -> null)), Row(Map(5 -> null)), Row(null))) checkAnswer(df.select(map_zip_with(df("m1"), df("m2"), (k, v1, v2) => k === v1 + v2)), Seq( Row(Map(8 -> true, 3 -> false, 6 -> true)), Row(Map(10 -> null, 8 -> false, 4 -> null)), Row(Map(5 -> null)), Row(null))) } test("map_zip_with function - map of non-primitive types") { val df = Seq( (Map("z" -> "a", "y" -> "b", "x" -> "c"), Map("x" -> "a", "z" -> "c")), (Map("b" -> "a", "c" -> "d"), Map("c" -> "a", "b" -> null, "d" -> "k")), (Map("a" -> "d"), Map.empty[String, String]), (Map("a" -> "d"), null) ).toDF("m1", "m2") checkAnswer(df.selectExpr("map_zip_with(m1, m2, (k, v1, v2) -> (v1, v2))"), Seq( Row(Map("z" -> Row("a", "c"), "y" -> Row("b", null), "x" -> Row("c", "a"))), Row(Map("b" -> Row("a", null), "c" -> Row("d", "a"), "d" -> Row(null, "k"))), Row(Map("a" -> Row("d", null))), Row(null))) checkAnswer(df.select(map_zip_with(col("m1"), col("m2"), (k, v1, v2) => struct(v1, v2))), Seq( Row(Map("z" -> Row("a", "c"), "y" -> Row("b", null), "x" -> Row("c", "a"))), Row(Map("b" -> Row("a", null), "c" -> Row("d", "a"), "d" -> Row(null, "k"))), Row(Map("a" -> Row("d", null))), Row(null))) } test("map_zip_with function - invalid") { val df = Seq( (Map(1 -> 2), Map(1 -> "a"), Map("a" -> "b"), Map(Map(1 -> 2) -> 2), 1) ).toDF("mii", "mis", "mss", "mmi", "i") val ex1 = intercept[AnalysisException] { df.selectExpr("map_zip_with(mii, mis, (x, y) -> x + y)") } assert(ex1.getMessage.contains("The number of lambda function arguments '2' does not match")) val ex2 = intercept[AnalysisException] { df.selectExpr("map_zip_with(mis, mmi, (x, y, z) -> concat(x, y, z))") } assert(ex2.getMessage.contains("The input to function map_zip_with should have " + "been two maps with compatible key types")) val ex2a = intercept[AnalysisException] { df.select(map_zip_with(df("mis"), col("mmi"), (x, y, z) => concat(x, y, z))) } assert(ex2a.getMessage.contains("The input to function map_zip_with should have " + "been two maps with compatible key types")) val ex3 = intercept[AnalysisException] { df.selectExpr("map_zip_with(i, mis, (x, y, z) -> concat(x, y, z))") } assert(ex3.getMessage.contains("type mismatch: argument 1 requires map type")) val ex3a = intercept[AnalysisException] { df.select(map_zip_with(col("i"), col("mis"), (x, y, z) => concat(x, y, z))) } assert(ex3a.getMessage.contains("type mismatch: argument 1 requires map type")) val ex4 = intercept[AnalysisException] { df.selectExpr("map_zip_with(mis, i, (x, y, z) -> concat(x, y, z))") } assert(ex4.getMessage.contains("type mismatch: argument 2 requires map type")) val ex4a = intercept[AnalysisException] { df.select(map_zip_with(col("mis"), col("i"), (x, y, z) => concat(x, y, z))) } assert(ex4a.getMessage.contains("type mismatch: argument 2 requires map type")) val ex5 = intercept[AnalysisException] { df.selectExpr("map_zip_with(mmi, mmi, (x, y, z) -> x)") } assert(ex5.getMessage.contains("function map_zip_with does not support ordering on type map")) } ignore("transform keys function - primitive data types") { val dfExample1 = Seq( Map[Int, Int](1 -> 1, 9 -> 9, 8 -> 8, 7 -> 7) ).toDF("i") val dfExample2 = Seq( Map[Int, Double](1 -> 1.0, 2 -> 1.40, 3 -> 1.70) ).toDF("j") val dfExample3 = Seq( Map[Int, Boolean](25 -> true, 26 -> false) ).toDF("x") val dfExample4 = Seq( Map[Array[Int], Boolean](Array(1, 2) -> false) ).toDF("y") def testMapOfPrimitiveTypesCombination(): Unit = { checkAnswer(dfExample1.selectExpr("transform_keys(i, (k, v) -> k + v)"), Seq(Row(Map(2 -> 1, 18 -> 9, 16 -> 8, 14 -> 7)))) checkAnswer(dfExample1.select(transform_keys(col("i"), (k, v) => k + v)), Seq(Row(Map(2 -> 1, 18 -> 9, 16 -> 8, 14 -> 7)))) checkAnswer(dfExample2.selectExpr("transform_keys(j, " + "(k, v) -> map_from_arrays(ARRAY(1, 2, 3), ARRAY('one', 'two', 'three'))[k])"), Seq(Row(Map("one" -> 1.0, "two" -> 1.4, "three" -> 1.7)))) checkAnswer(dfExample2.select( transform_keys( col("j"), (k, v) => element_at( map_from_arrays( array(lit(1), lit(2), lit(3)), array(lit("one"), lit("two"), lit("three")) ), k ) ) ), Seq(Row(Map("one" -> 1.0, "two" -> 1.4, "three" -> 1.7)))) checkAnswer(dfExample2.selectExpr("transform_keys(j, (k, v) -> CAST(v * 2 AS BIGINT) + k)"), Seq(Row(Map(3 -> 1.0, 4 -> 1.4, 6 -> 1.7)))) checkAnswer(dfExample2.select(transform_keys(col("j"), (k, v) => (v * 2).cast("bigint") + k)), Seq(Row(Map(3 -> 1.0, 4 -> 1.4, 6 -> 1.7)))) checkAnswer(dfExample2.selectExpr("transform_keys(j, (k, v) -> k + v)"), Seq(Row(Map(2.0 -> 1.0, 3.4 -> 1.4, 4.7 -> 1.7)))) checkAnswer(dfExample2.select(transform_keys(col("j"), (k, v) => k + v)), Seq(Row(Map(2.0 -> 1.0, 3.4 -> 1.4, 4.7 -> 1.7)))) intercept[SparkException] { dfExample3.selectExpr("transform_keys(x, (k, v) -> k % 2 = 0 OR v)").collect() } intercept[SparkException] { dfExample3.select(transform_keys(col("x"), (k, v) => k % 2 === 0 || v)).collect() } withSQLConf(SQLConf.MAP_KEY_DEDUP_POLICY.key -> SQLConf.MapKeyDedupPolicy.LAST_WIN.toString) { checkAnswer(dfExample3.selectExpr("transform_keys(x, (k, v) -> k % 2 = 0 OR v)"), Seq(Row(Map(true -> true, true -> false)))) checkAnswer(dfExample3.select(transform_keys(col("x"), (k, v) => k % 2 === 0 || v)), Seq(Row(Map(true -> true, true -> false)))) } checkAnswer(dfExample3.selectExpr("transform_keys(x, (k, v) -> if(v, 2 * k, 3 * k))"), Seq(Row(Map(50 -> true, 78 -> false)))) checkAnswer(dfExample3.select(transform_keys(col("x"), (k, v) => when(v, k * 2).otherwise(k * 3))), Seq(Row(Map(50 -> true, 78 -> false)))) checkAnswer(dfExample4.selectExpr("transform_keys(y, (k, v) -> array_contains(k, 3) AND v)"), Seq(Row(Map(false -> false)))) checkAnswer(dfExample4.select(transform_keys(col("y"), (k, v) => array_contains(k, lit(3)) && v)), Seq(Row(Map(false -> false)))) } // Test with local relation, the Project will be evaluated without codegen testMapOfPrimitiveTypesCombination() dfExample1.cache() dfExample2.cache() dfExample3.cache() dfExample4.cache() // Test with cached relation, the Project will be evaluated with codegen testMapOfPrimitiveTypesCombination() } test("transform keys function - Invalid lambda functions and exceptions") { val dfExample1 = Seq( Map[String, String]("a" -> null) ).toDF("i") val dfExample2 = Seq( Seq(1, 2, 3, 4) ).toDF("j") val ex1 = intercept[AnalysisException] { dfExample1.selectExpr("transform_keys(i, k -> k)") } assert(ex1.getMessage.contains("The number of lambda function arguments '1' does not match")) val ex2 = intercept[AnalysisException] { dfExample1.selectExpr("transform_keys(i, (k, v, x) -> k + 1)") } assert(ex2.getMessage.contains( "The number of lambda function arguments '3' does not match")) val ex3 = intercept[Exception] { dfExample1.selectExpr("transform_keys(i, (k, v) -> v)").show() } assert(ex3.getMessage.contains("Cannot use null as map key")) val ex3a = intercept[Exception] { dfExample1.select(transform_keys(col("i"), (k, v) => v)).show() } assert(ex3a.getMessage.contains("Cannot use null as map key")) val ex4 = intercept[AnalysisException] { dfExample2.selectExpr("transform_keys(j, (k, v) -> k + 1)") } assert(ex4.getMessage.contains( "data type mismatch: argument 1 requires map type")) } ignore("transform values function - test primitive data types") { val dfExample1 = Seq( Map[Int, Int](1 -> 1, 9 -> 9, 8 -> 8, 7 -> 7) ).toDF("i") val dfExample2 = Seq( Map[Boolean, String](false -> "abc", true -> "def") ).toDF("x") val dfExample3 = Seq( Map[String, Int]("a" -> 1, "b" -> 2, "c" -> 3) ).toDF("y") val dfExample4 = Seq( Map[Int, Double](1 -> 1.0, 2 -> 1.40, 3 -> 1.70) ).toDF("z") val dfExample5 = Seq( Map[Int, Array[Int]](1 -> Array(1, 2)) ).toDF("c") def testMapOfPrimitiveTypesCombination(): Unit = { checkAnswer(dfExample1.selectExpr("transform_values(i, (k, v) -> k + v)"), Seq(Row(Map(1 -> 2, 9 -> 18, 8 -> 16, 7 -> 14)))) checkAnswer(dfExample2.selectExpr( "transform_values(x, (k, v) -> if(k, v, CAST(k AS String)))"), Seq(Row(Map(false -> "false", true -> "def")))) checkAnswer(dfExample2.selectExpr("transform_values(x, (k, v) -> NOT k AND v = 'abc')"), Seq(Row(Map(false -> true, true -> false)))) checkAnswer(dfExample3.selectExpr("transform_values(y, (k, v) -> v * v)"), Seq(Row(Map("a" -> 1, "b" -> 4, "c" -> 9)))) checkAnswer(dfExample3.selectExpr( "transform_values(y, (k, v) -> k || ':' || CAST(v as String))"), Seq(Row(Map("a" -> "a:1", "b" -> "b:2", "c" -> "c:3")))) checkAnswer( dfExample3.selectExpr("transform_values(y, (k, v) -> concat(k, cast(v as String)))"), Seq(Row(Map("a" -> "a1", "b" -> "b2", "c" -> "c3")))) checkAnswer( dfExample4.selectExpr( "transform_values(" + "z,(k, v) -> map_from_arrays(ARRAY(1, 2, 3), " + "ARRAY('one', 'two', 'three'))[k] || '_' || CAST(v AS String))"), Seq(Row(Map(1 -> "one_1.0", 2 -> "two_1.4", 3 ->"three_1.7")))) checkAnswer( dfExample4.selectExpr("transform_values(z, (k, v) -> k-v)"), Seq(Row(Map(1 -> 0.0, 2 -> 0.6000000000000001, 3 -> 1.3)))) checkAnswer( dfExample5.selectExpr("transform_values(c, (k, v) -> k + cardinality(v))"), Seq(Row(Map(1 -> 3)))) checkAnswer(dfExample1.select(transform_values(col("i"), (k, v) => k + v)), Seq(Row(Map(1 -> 2, 9 -> 18, 8 -> 16, 7 -> 14)))) checkAnswer(dfExample2.select( transform_values(col("x"), (k, v) => when(k, v).otherwise(k.cast("string")))), Seq(Row(Map(false -> "false", true -> "def")))) checkAnswer(dfExample2.select(transform_values(col("x"), (k, v) => (!k) && v === "abc")), Seq(Row(Map(false -> true, true -> false)))) checkAnswer(dfExample3.select(transform_values(col("y"), (k, v) => v * v)), Seq(Row(Map("a" -> 1, "b" -> 4, "c" -> 9)))) checkAnswer(dfExample3.select( transform_values(col("y"), (k, v) => concat(k, lit(":"), v.cast("string")))), Seq(Row(Map("a" -> "a:1", "b" -> "b:2", "c" -> "c:3")))) checkAnswer( dfExample3.select(transform_values(col("y"), (k, v) => concat(k, v.cast("string")))), Seq(Row(Map("a" -> "a1", "b" -> "b2", "c" -> "c3")))) val testMap = map_from_arrays( array(lit(1), lit(2), lit(3)), array(lit("one"), lit("two"), lit("three")) ) checkAnswer( dfExample4.select(transform_values(col("z"), (k, v) => concat(element_at(testMap, k), lit("_"), v.cast("string")))), Seq(Row(Map(1 -> "one_1.0", 2 -> "two_1.4", 3 ->"three_1.7")))) checkAnswer( dfExample4.select(transform_values(col("z"), (k, v) => k - v)), Seq(Row(Map(1 -> 0.0, 2 -> 0.6000000000000001, 3 -> 1.3)))) checkAnswer( dfExample5.select(transform_values(col("c"), (k, v) => k + size(v))), Seq(Row(Map(1 -> 3)))) } // Test with local relation, the Project will be evaluated without codegen testMapOfPrimitiveTypesCombination() dfExample1.cache() dfExample2.cache() dfExample3.cache() dfExample4.cache() dfExample5.cache() // Test with cached relation, the Project will be evaluated with codegen testMapOfPrimitiveTypesCombination() } ignore("transform values function - test empty") { val dfExample1 = Seq( Map.empty[Integer, Integer] ).toDF("i") val dfExample2 = Seq( Map.empty[BigInt, String] ).toDF("j") def testEmpty(): Unit = { checkAnswer(dfExample1.selectExpr("transform_values(i, (k, v) -> NULL)"), Seq(Row(Map.empty[Integer, Integer]))) checkAnswer(dfExample1.selectExpr("transform_values(i, (k, v) -> k)"), Seq(Row(Map.empty[Integer, Integer]))) checkAnswer(dfExample1.selectExpr("transform_values(i, (k, v) -> v)"), Seq(Row(Map.empty[Integer, Integer]))) checkAnswer(dfExample1.selectExpr("transform_values(i, (k, v) -> 0)"), Seq(Row(Map.empty[Integer, Integer]))) checkAnswer(dfExample1.selectExpr("transform_values(i, (k, v) -> 'value')"), Seq(Row(Map.empty[Integer, String]))) checkAnswer(dfExample1.selectExpr("transform_values(i, (k, v) -> true)"), Seq(Row(Map.empty[Integer, Boolean]))) checkAnswer(dfExample2.selectExpr("transform_values(j, (k, v) -> k + cast(v as BIGINT))"), Seq(Row(Map.empty[BigInt, BigInt]))) checkAnswer(dfExample1.select(transform_values(col("i"), (k, v) => lit(null).cast("int"))), Seq(Row(Map.empty[Integer, Integer]))) checkAnswer(dfExample1.select(transform_values(col("i"), (k, v) => k)), Seq(Row(Map.empty[Integer, Integer]))) checkAnswer(dfExample1.select(transform_values(col("i"), (k, v) => v)), Seq(Row(Map.empty[Integer, Integer]))) checkAnswer(dfExample1.select(transform_values(col("i"), (k, v) => lit(0))), Seq(Row(Map.empty[Integer, Integer]))) checkAnswer(dfExample1.select(transform_values(col("i"), (k, v) => lit("value"))), Seq(Row(Map.empty[Integer, String]))) checkAnswer(dfExample1.select(transform_values(col("i"), (k, v) => lit(true))), Seq(Row(Map.empty[Integer, Boolean]))) checkAnswer(dfExample1.select(transform_values(col("i"), (k, v) => v.cast("bigint"))), Seq(Row(Map.empty[BigInt, BigInt]))) } testEmpty() dfExample1.cache() dfExample2.cache() testEmpty() } ignore("transform values function - test null values") { val dfExample1 = Seq( Map[Int, Integer](1 -> 1, 2 -> 2, 3 -> 3, 4 -> 4) ).toDF("a") val dfExample2 = Seq( Map[Int, String](1 -> "a", 2 -> "b", 3 -> null) ).toDF("b") def testNullValue(): Unit = { checkAnswer(dfExample1.selectExpr("transform_values(a, (k, v) -> null)"), Seq(Row(Map[Int, Integer](1 -> null, 2 -> null, 3 -> null, 4 -> null)))) checkAnswer(dfExample2.selectExpr( "transform_values(b, (k, v) -> IF(v IS NULL, k + 1, k + 2))"), Seq(Row(Map(1 -> 3, 2 -> 4, 3 -> 4)))) checkAnswer(dfExample1.select(transform_values(col("a"), (k, v) => lit(null).cast("int"))), Seq(Row(Map[Int, Integer](1 -> null, 2 -> null, 3 -> null, 4 -> null)))) checkAnswer(dfExample2.select( transform_values(col("b"), (k, v) => when(v.isNull, k + 1).otherwise(k + 2)) ), Seq(Row(Map(1 -> 3, 2 -> 4, 3 -> 4)))) } testNullValue() dfExample1.cache() dfExample2.cache() testNullValue() } test("transform values function - test invalid functions") { val dfExample1 = Seq( Map[Int, Int](1 -> 1, 9 -> 9, 8 -> 8, 7 -> 7) ).toDF("i") val dfExample2 = Seq( Map[String, String]("a" -> "b") ).toDF("j") val dfExample3 = Seq( Seq(1, 2, 3, 4) ).toDF("x") def testInvalidLambdaFunctions(): Unit = { val ex1 = intercept[AnalysisException] { dfExample1.selectExpr("transform_values(i, k -> k)") } assert(ex1.getMessage.contains("The number of lambda function arguments '1' does not match")) val ex2 = intercept[AnalysisException] { dfExample2.selectExpr("transform_values(j, (k, v, x) -> k + 1)") } assert(ex2.getMessage.contains("The number of lambda function arguments '3' does not match")) val ex3 = intercept[AnalysisException] { dfExample3.selectExpr("transform_values(x, (k, v) -> k + 1)") } assert(ex3.getMessage.contains( "data type mismatch: argument 1 requires map type")) val ex3a = intercept[AnalysisException] { dfExample3.select(transform_values(col("x"), (k, v) => k + 1)) } assert(ex3a.getMessage.contains( "data type mismatch: argument 1 requires map type")) } testInvalidLambdaFunctions() dfExample1.cache() dfExample2.cache() dfExample3.cache() testInvalidLambdaFunctions() } test("arrays zip_with function - for primitive types") { val df1 = Seq[(Seq[Integer], Seq[Integer])]( (Seq(9001, 9002, 9003), Seq(4, 5, 6)), (Seq(1, 2), Seq(3, 4)), (Seq.empty, Seq.empty), (null, null) ).toDF("val1", "val2") val df2 = Seq[(Seq[Integer], Seq[Long])]( (Seq(1, null, 3), Seq(1L, 2L)), (Seq(1, 2, 3), Seq(4L, 11L)) ).toDF("val1", "val2") val expectedValue1 = Seq( Row(Seq(9005, 9007, 9009)), Row(Seq(4, 6)), Row(Seq.empty), Row(null)) checkAnswer(df1.selectExpr("zip_with(val1, val2, (x, y) -> x + y)"), expectedValue1) checkAnswer(df1.select(zip_with(df1("val1"), df1("val2"), (x, y) => x + y)), expectedValue1) val expectedValue2 = Seq( Row(Seq(Row(1L, 1), Row(2L, null), Row(null, 3))), Row(Seq(Row(4L, 1), Row(11L, 2), Row(null, 3)))) checkAnswer(df2.selectExpr("zip_with(val1, val2, (x, y) -> (y, x))"), expectedValue2) checkAnswer( df2.select(zip_with(df2("val1"), df2("val2"), (x, y) => struct(y, x))), expectedValue2 ) } test("arrays zip_with function - for non-primitive types") { val df = Seq( (Seq("a"), Seq("x", "y", "z")), (Seq("a", null), Seq("x", "y")), (Seq.empty[String], Seq.empty[String]), (Seq("a", "b", "c"), null) ).toDF("val1", "val2") val expectedValue1 = Seq( Row(Seq(Row("x", "a"), Row("y", null), Row("z", null))), Row(Seq(Row("x", "a"), Row("y", null))), Row(Seq.empty), Row(null)) checkAnswer( df.selectExpr("zip_with(val1, val2, (x, y) -> (y, x))"), expectedValue1 ) checkAnswer( df.select(zip_with(col("val1"), col("val2"), (x, y) => struct(y, x))), expectedValue1 ) } test("arrays zip_with function - invalid") { val df = Seq( (Seq("c", "a", "b"), Seq("x", "y", "z"), 1), (Seq("b", null, "c", null), Seq("x"), 2), (Seq.empty, Seq("x", "z"), 3), (null, Seq("x", "z"), 4) ).toDF("a1", "a2", "i") val ex1 = intercept[AnalysisException] { df.selectExpr("zip_with(a1, a2, x -> x)") } assert(ex1.getMessage.contains("The number of lambda function arguments '1' does not match")) val ex2 = intercept[AnalysisException] { df.selectExpr("zip_with(a1, a2, (acc, x) -> x, (acc, x) -> x)") } assert(ex2.getMessage.contains("Invalid number of arguments for function zip_with")) val ex3 = intercept[AnalysisException] { df.selectExpr("zip_with(i, a2, (acc, x) -> x)") } assert(ex3.getMessage.contains("data type mismatch: argument 1 requires array type")) val ex3a = intercept[AnalysisException] { df.select(zip_with(df("i"), df("a2"), (acc, x) => x)) } assert(ex3a.getMessage.contains("data type mismatch: argument 1 requires array type")) val ex4 = intercept[AnalysisException] { df.selectExpr("zip_with(a1, a, (acc, x) -> x)") } assert(ex4.getMessage.contains("cannot resolve '`a`'")) } private def assertValuesDoNotChangeAfterCoalesceOrUnion(v: Column): Unit = { import DataFrameFunctionsSuite.CodegenFallbackExpr for ((codegenFallback, wholeStage) <- Seq((true, false), (false, false), (false, true))) { val c = if (codegenFallback) { Column(CodegenFallbackExpr(v.expr)) } else { v } withSQLConf( (SQLConf.CODEGEN_FALLBACK.key, codegenFallback.toString), (SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, wholeStage.toString)) { val df = spark.range(0, 4, 1, 4).withColumn("c", c) val rows = df.collect() val rowsAfterCoalesce = df.coalesce(2).collect() assert(rows === rowsAfterCoalesce, "Values changed after coalesce when " + s"codegenFallback=$codegenFallback and wholeStage=$wholeStage.") val df1 = spark.range(0, 2, 1, 2).withColumn("c", c) val rows1 = df1.collect() val df2 = spark.range(2, 4, 1, 2).withColumn("c", c) val rows2 = df2.collect() val rowsAfterUnion = df1.union(df2).collect() assert(rowsAfterUnion === rows1 ++ rows2, "Values changed after union when " + s"codegenFallback=$codegenFallback and wholeStage=$wholeStage.") } } } test("SPARK-14393: values generated by non-deterministic functions shouldn't change after " + "coalesce or union") { Seq( monotonically_increasing_id(), spark_partition_id(), rand(Random.nextLong()), randn(Random.nextLong()) ).foreach(assertValuesDoNotChangeAfterCoalesceOrUnion(_)) } test("SPARK-21281 fails if functions have no argument") { val df = Seq(1).toDF("a") val funcsMustHaveAtLeastOneArg = ("coalesce", (df: DataFrame) => df.select(coalesce())) :: ("coalesce", (df: DataFrame) => df.selectExpr("coalesce()")) :: ("hash", (df: DataFrame) => df.select(hash())) :: ("hash", (df: DataFrame) => df.selectExpr("hash()")) :: ("xxhash64", (df: DataFrame) => df.select(xxhash64())) :: ("xxhash64", (df: DataFrame) => df.selectExpr("xxhash64()")) :: Nil funcsMustHaveAtLeastOneArg.foreach { case (name, func) => val errMsg = intercept[AnalysisException] { func(df) }.getMessage assert(errMsg.contains(s"input to function $name requires at least one argument")) } val funcsMustHaveAtLeastTwoArgs = ("greatest", (df: DataFrame) => df.select(greatest())) :: ("greatest", (df: DataFrame) => df.selectExpr("greatest()")) :: ("least", (df: DataFrame) => df.select(least())) :: ("least", (df: DataFrame) => df.selectExpr("least()")) :: Nil funcsMustHaveAtLeastTwoArgs.foreach { case (name, func) => val errMsg = intercept[AnalysisException] { func(df) }.getMessage assert(errMsg.contains(s"input to function $name requires at least two arguments")) } } test("SPARK-24734: Fix containsNull of Concat for array type") { val df = Seq((Seq(1), Seq[Integer](null), Seq("a", "b"))).toDF("k1", "k2", "v") val ex = intercept[Exception] { df.select(map_from_arrays(concat($"k1", $"k2"), $"v")).show() } assert(ex.getMessage.contains("Cannot use null as map key")) } ignore("SPARK-26370: Fix resolution of higher-order function for the same identifier") { val df = Seq( (Seq(1, 9, 8, 7), 1, 2), (Seq(5, 9, 7), 2, 2), (Seq.empty, 3, 2), (null, 4, 2) ).toDF("i", "x", "d") checkAnswer(df.selectExpr("x", "exists(i, x -> x % d == 0)"), Seq( Row(1, true), Row(2, false), Row(3, false), Row(4, null))) checkAnswer(df.filter("exists(i, x -> x % d == 0)"), Seq(Row(Seq(1, 9, 8, 7), 1, 2))) checkAnswer(df.select("x").filter("exists(i, x -> x % d == 0)"), Seq(Row(1))) } test("SPARK-29462: Empty array of NullType for array function with no arguments") { Seq((true, StringType), (false, NullType)).foreach { case (arrayDefaultToString, expectedType) => withSQLConf(SQLConf.LEGACY_CREATE_EMPTY_COLLECTION_USING_STRING_TYPE.key -> arrayDefaultToString.toString) { val schema = spark.range(1).select(array()).schema assert(schema.nonEmpty && schema.head.dataType.isInstanceOf[ArrayType]) val actualType = schema.head.dataType.asInstanceOf[ArrayType].elementType assert(actualType === expectedType) } } } test("SPARK-30790: Empty map with NullType as key/value type for map function with no argument") { Seq((true, StringType), (false, NullType)).foreach { case (mapDefaultToString, expectedType) => withSQLConf(SQLConf.LEGACY_CREATE_EMPTY_COLLECTION_USING_STRING_TYPE.key -> mapDefaultToString.toString) { val schema = spark.range(1).select(map()).schema assert(schema.nonEmpty && schema.head.dataType.isInstanceOf[MapType]) val actualKeyType = schema.head.dataType.asInstanceOf[MapType].keyType val actualValueType = schema.head.dataType.asInstanceOf[MapType].valueType assert(actualKeyType === expectedType) assert(actualValueType === expectedType) } } } test("SPARK-26071: convert map to array and use as map key") { val df = Seq(Map(1 -> "a")).toDF("m") intercept[AnalysisException](df.select(map($"m", lit(1)))) checkAnswer( df.select(map(map_entries($"m"), lit(1))), Row(Map(Seq(Row(1, "a")) -> 1))) } } object DataFrameFunctionsSuite { case class CodegenFallbackExpr(child: Expression) extends Expression with CodegenFallback { override def children: Seq[Expression] = Seq(child) override def nullable: Boolean = child.nullable override def dataType: DataType = child.dataType override lazy val resolved = true override def eval(input: InternalRow): Any = child.eval(input) } }
Intel-bigdata/OAP
oap-native-sql/core/src/test/scala/org/apache/spark/sql/DataFrameFunctionsSuite.scala
Scala
apache-2.0
121,259
package edu.berkeley.cs.amplab import spark.RDD import scala.collection.immutable.TreeMap /** * The main analysis body is in StoredSampleAnalysisGeneric. * It should be used for all the live analysis, plotting, and speed testing classes. */ package object carat { val CARAT = "Carat" val UUIDString = "uuId" val resultsTable = "carat.latestresults" val resultKey = UUIDString val similarsTable = "carat.similarusers" val similarKey = UUIDString val hogsTable = "carat.latestapps" val hogKey = "appName" val bugsTable = "carat.latestbugs" val expectedValue = "expectedValue" val expectedValueNeg = expectedValue+"Neg" val modelsTable = "carat.latestmodels" val modelKey = "model" val osTable = "carat.latestos" val osKey = "os" // For getting data: val registrationTable = "carat.registrations" val samplesTable = "carat.samples" val regsUuid = UUIDString val regsModel = "platformId" val regsTimestamp = "timestamp" val regsOs = "osVersion" val sampleKey = regsUuid val sampleTime = regsTimestamp val sampleProcesses = "processList" val sampleBatteryState = "batteryState" val sampleBatteryLevel = "batteryLevel" val sampleEvent = "triggeredBy" def similarityCount(numberOfApps: Double) = math.log(numberOfApps) def flatten(filtered: RDD[(String, TreeMap[Double, Double])]) = { // there are x treemaps. We need to flatten them but include the uuid. filtered.flatMap(x => { var result = new TreeMap[Double, Double] for (k <- x._2) result += ((k._1, k._2)) result }).collect() } def flatten(structured: RDD[(String, Seq[CaratRate])]) = { // there are x treemaps. We need to flatten them but include the uuid. structured.flatMap(x => {x._2}).collect() } }
lagerspetz/TimeSeriesSpark
src/edu/berkeley/cs/amplab/carat/package.scala
Scala
bsd-3-clause
1,789
/* * Copyright 2001-2016 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest import SharedHelpers._ import FailureMessages.decorateToStringValue import Matchers._ class FixtureContextSpec extends FunSuite { class MyFixtureContext extends FixtureContext test("Fixture context objects should work in Async styles in Assertion-result tests") { class MyAsyncSpec extends AsyncFlatSpec { "A Fixture Context" should "work in an Async style" in new MyFixtureContext { assert(1 + 1 == 2) } it should "work when it fails" in new MyFixtureContext { assert(1 + 1 == 3) } } val suite = new MyAsyncSpec() val rep = new EventRecordingReporter() suite.run(None, Args(rep)) val testSucceededEvents = rep.testSucceededEventsReceived val testFailedEvents = rep.testFailedEventsReceived assert(testSucceededEvents.length === 1) assert(testSucceededEvents(0).testName.endsWith("work in an Async style")) assert(testFailedEvents.length === 1) assert(testFailedEvents(0).testName.endsWith("work when it fails")) } }
dotty-staging/scalatest
scalatest-test/src/test/scala/org/scalatest/FixtureContextSpec.scala
Scala
apache-2.0
1,635
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** @author John Miller, Robert Davis * @version 1.2 * @date Sun Sep 16 14:09:25 EDT 2012 * @see LICENSE (MIT style license file). * * This file contains classes for Hessenburg reductions, finding Eigenvalues * and computing Eigenvectors. * Need to add ability to work with `SparseMatrixD` */ package scalation.linalgebra import scala.math.{abs, signum, sqrt} import scala.util.control.Breaks.{breakable, break} import scalation.linalgebra.Householder.house import scalation.linalgebra.MatrixD.{eye, outer} import scalation.math.double_exp import scalation.math.ExtremeD.TOL import scalation.util.Error //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Eigen` trait defines constants used by classes and objects in the group. */ trait Eigen { /** Debug flag */ protected val DEBUG = true } // Eigen trait //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Hessenburg` class is used to reduce, via similarity transformations, an * 'n' by 'n' matrix 'a' to Hessenburg form 'h', where all elements two below the * main diagonal are zero (or close to zero). Note, similarity transformations * do not changes the eigenvalues. * @param a the matrix to reduce to Hessenburg form */ class Hessenburg (a: MatrixD) extends Eigen with Error { private val (m, n) = (a.dim1, a.dim2) // size of matrix private var h = new MatrixD (a) // Hessenburg h matrix if (m != n) flaw ("constructor", "must have m == n") for (j <- 0 until n) { // for each column j val x = h.col(j, j) // jth column from jth position val u = x + x.oneAt (0) * x.norm * (if (x(0) < 0.0) -1.0 else 1.0) val pp = eye (n-j) - outer (u, u) * (2.0 / u.normSq) val p = eye (j) diag pp h = p.t * h * p } // for //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Get the Hessenburg h matrix. */ def getH: MatrixD = h } // Hessenburg class //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Eigenvalue` class is used to find the eigenvalues of an 'n' by 'n' matrix * 'a' using an iterative technique that applies similarity transformations to * convert 'a' into an upper triangular matrix, so that the eigenvalues appear * along the diagonal. To improve performance, the 'a' matrix is first reduced * to Hessenburg form. During the iterative steps, a shifted 'QR' decomposition * is performed. * Caveats: (1) it will not handle eigenvalues that are complex numbers, * (2) it uses a simple shifting strategy that may slow convergence. * @param a the matrix whose eigenvalues are sought */ class Eigenvalue (a: MatrixD) extends Eigen with Error { private val ITERATIONS = 12 // max iterations: increase --> more precision, but slower private val (m, n) = (a.dim1, a.dim2) // size of matrix private val e = new VectorD (m) // vector of eigenvalues if (m != n) flaw ("constructor", "must have m == n") var g = (new Hessenburg (a)).getH // convert g matrix to Hessenburg form var converging = true // still converging, has not converged yet var lastE = Double.PositiveInfinity // save an eigenvalue from last iteration for (k <- 0 until ITERATIONS if converging) { // major iterations converging = true for (l <- 0 until ITERATIONS) { // minor iterations val s = g(n - 1, n - 1) // the shift parameter val eye_g = eye (g.dim1) val (qq, rr) = (new Fac_QR_H (g - eye_g * s)).factor12 () g = rr.asInstanceOf [MatrixD] * qq.asInstanceOf [MatrixD] + eye_g * s // FIX } // for for (i <- 0 until n) e(i) = g(i, i) // extract eigenvalues from diagonal val e0 = e(0) // consider one eigenvalue if (abs ((lastE - e0) / e0) < TOL) { // relative error converging = false // end major iterations } else { lastE = e0 // save this eigenvalue } // if if (DEBUG) { println ("-------------------------------------------") println ("Eigenvalue: on iteration " + k + ": g = " + g) println ("Eigenvalue: on iteration " + k + ": e = " + e) if (! converging) println ("Eigenvalue: converged!") } // if } // for //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Reorder the eigenvalue vector 'e' in non-increasing order. */ def reorder () { e.sort2 () } //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Get the eigenvalue 'e' vector. * @param order whether to order the eigenvalues in non-increasing order */ def getE (order: Boolean = true): VectorD = { if (order) reorder() ; e } } // Eigenvalue class //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `HouseholderT` class performs a Householder Tridiagonalization on a * symmetric matrix. * @see Algorithm 8.3.1 in Matrix Computations. * @param a the symmetric matrix to tridiagonalize */ class HouseholderT (a: MatrixD) extends Eigen with Error { /** The Householder tridiagonal matrix */ private val t = new SymTriMatrixD (a.dim1) if (a.dim1 != a.dim2) flaw ("constructor", "must have m == n") if (! a.isSymmetric) flaw ("constructor", "matrix a must be symmetric") val n = a.dim1 - 1 // the last index for (k <- 0 to n - 2) { val ts = a.col(k).slice (k+1, n+1) val v_b = house (ts) val v = v_b._1; val b = v_b._2 val p = a.slice (k+1, n+1, k+1, n+1) * v * b val w = p - v * ((b / 2) * (p dot v)) t(k, k) = a(k, k) t(k+1, k) = ts.norm for (i <- k + 1 to n; j <- k + 1 to n) { a(i, j) = a(i, j) - (v(i - (k+1)) * w(j - (k+1)) + w(i - (k+1)) * v(j - (k+1))) } // for } // for t(n-1, n) = a(n-1, n) t(n-1, n-1) = a(n-1, n-1) t(n, n) = a(n, n) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Get the Householder Tridiagonal matrix 't'. */ def getT: SymTriMatrixD = t } // HouseholderT class //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `SymmetricQRstep` object performs a symmetric 'QR' step with a Wilkinson shift. * @see Algorithm 8.3.2 in Matrix Computations. * @see http://people.inf.ethz.ch/arbenz/ewp/Lnotes/chapter3.pdf (Algorithm 3.6) */ object SymmetricQRstep extends Eigen with Error { //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Apply a 'QR' reduction step to matrix 't'. * @param t the unreduced symmetric tridiagonal matrix * @param p the row index * @param q the column index */ def qRStep (t: SymTriMatrixD, p: Int, q: Int) = { val n = t.dg.dim - q - 1 // the last index val d = (t.dg(n-1) - t.dg(n)) / 2.0 // Wilkinson shift val t2 = t.sd(n-1) * t.sd(n-1) val d2 = t.dg(n) - t2 / (d + signum (d) * sqrt (d * d + t2)) var g = t.dg(0) - d2 var s = 1.0 var c = 1.0 var phi = 0.0 for (k <- p until n) { var f = s * (t.sd(k)) var b = c * (t.sd(k)) var r = sqrt (g * g + f * f) c = g / r s = f / r if (k != 0) t.sd(k-1) = r g = t.dg(k) - phi r = (t.dg(k+1) - g) * s + 2.0 * c * b phi = s * r t.dg(k) = g + phi g = c * r - b } // for t.dg(n) = t.dg(n) - phi t.sd(n-1) = g } // qRStep } // SymmetricQRstep object //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `EigenvalueSym` class is used to find the eigenvalues of an 'n' by 'n' * symmetric matrix 'a' using an iterative technique, the Symmetric 'QR' Algorithm. * @see Algorithm 8.3.3 in Matrix Computations. * Caveats: (1) it will not handle eigenvalues that are complex numbers, * (2) it uses a simple shifting strategy that may slow convergence. * @param a the symmetric matrix whose eigenvalues are sought */ class EigenvalueSym (a: MatrixD) extends Eigen with Error { /** The matrix containing a vector of eigenvalues */ private var d: SymTriMatrixD = null val m = a.dim1 // number of rows if (m != a.dim2) flaw ("constructor", "must have m == n") if (! a.isSymmetric) flaw ("constructor", "matrix a must be symmetric") var p = 0 // the row index var q = 0 // the column index d = (new HouseholderT (a)).getT // make symmetric tridiagonal matrix while (q < m) { for (i <- 0 to m-2 if abs (d(i, i+1)) <= TOL) d(i, i+1) = 0.0 // clean d q = 0; p = m-1 while (p > 0 && d(p, p-1) =~ 0.0 && q < m) { q += 1; p -= 1 } while (p > 0 && ! (d(p, p-1) =~ 0.0)) p -= 1 if (q < m) SymmetricQRstep.qRStep (d, p, q) } // while //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Get the eigenvalue 'e' vector. */ def getE: VectorD = d.dg // the diagonal of the tridiagonal matrix } // EigenvalueSym //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Eigenvector` class is used to find the eigenvectors of an 'n' by 'n' matrix * 'a' by solving equations of the form * <p> * (a - eI)v = 0 * <p> * where 'e' is the eigenvalue and 'v' is the eigenvector. Place the eigenvectors * in a matrix column-wise. * @param a the matrix whose eigenvectors are sought * @param _e the vector of eigenvalues of matrix a */ class Eigenvector (a: MatrixD, _e: VectorD = null) extends Eigen with Error { private val ITERATIONS = 12 // max iterations private val m = a.dim1 // number of rows if (a.dim2 != m) flaw ("constructor", "must have m == n") private val v = new MatrixD (m, m) // eigenvectors matrix (each row) private val ident = eye (m) // identity matrix private val e = if (_e == null) (new Eigenvalue (a)).getE () else _e // find eigenvectors using nullspace calculation for (i <- 0 until m) { // compute eigenvector for i-th eigenvalue val a_Ie = (a - ident * e(i)) // a - Ie val c_a_Ie = a_Ie.clean (TOL) if (DEBUG) println (s"a_Ie = $a_Ie \nc_a_Ie = $c_a_Ie") val qr = new Fac_QR_H (c_a_Ie) qr.factor () val eVec = qr.nullspaceV (e.zero (m)) println ("+++ eigenvector for eigenvalue " + e(i) + " = " + eVec) val mat = a_Ie.slice (1, m) if (DEBUG) println ("mat = " + mat) val eVec2 = mat.nullspace println ("--- eigenvector for eigenvalue " + e(i) + " = " + eVec2) // v.setCol (i, eVec) v.setCol (i, eVec2) } // for // find eigenvectors using inverse iteration (also improves eigenvalues) // @see http://home.iitk.ac.in/~dasgupta/MathBook/lmastertrans.pdf (p. 130) // var y_k = new VectorD (m); y_k.set (1./m.toDouble) // old estimate of eigenvector // var y_l: VectorD = null // new estimate of eigenvector // // for (i <- 0 until m) { // compute eigenvector for i-th eigenvalue // breakable { for (k <- 0 until ITERATIONS) { // val a_Ie = a - ident * e(i) // form matrix: [a - Ie] // f (DEBUG) println ("a_Ie = " + a_Ie) // val qr = new Fac_QR_H (a_Ie) // qr.factor () // val y = qr.solve (y_k) // solve [a - Ie]y = y_k // y_l = y / y.norm // normalize // e(i) += 1.0 / (y_k dot y) // improve the eigenvalue // if ((y_l - y_k).norm < TOL) break // y_k = y_l // update the eigenvector // }} // for // println ("eigenvector for eigenvalue " + e(i) + " = " + y_l) // v.setCol (i, y_l) // } // for //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Get the eigenvector 'v' matrix. */ def getV: MatrixD = v } // Eigenvector class //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `EigenTest` object is used to test the all the classes used in computing * Eigenvalues and Eigenvectors for the non-symmetric/general case. * > run-main scalation.linalgebra.EigenTest */ object EigenTest extends App { import scalation.util.Banner.banner //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** For matrix a, find Hessenburg matrix, eigenvalues and eigenvectors. */ def test (a: MatrixD, name: String) { banner (name) val e = (new Eigenvalue (a)).getE () val v = (new Eigenvector (a, e)).getV println ("----------------------------------------------------------") println ("a = " + a) println ("e = " + e) println ("v = " + v) for (i <- 0 until v.dim1) { // check that a * v_i = e_i * v_i println ("a * v_i - v_i * e_i = " + (a * v.col(i) - v.col(i) * e(i))) } // for } // test // @see http://www.mathworks.com/help/symbolic/eigenvalue-trajectories.html // should give e = (3., 2., 1.) val b = new MatrixD ((3, 3), -149.0, -50.0, -154.0, // 3-by-3 matrix 537.0, 180.0, 546.0, -27.0, -9.0, -25.0) test (b, "matrix b") // @see http://www.math.hmc.edu/calculus/tutorials/eigenstuff/eigenstuff.pdf // should give e = (1., -3., -3.) val c = new MatrixD ((3, 3), 5.0, 8.0, 16.0, // 3-by-3 matrix 4.0, 1.0, 8.0, -4.0, -4.0, -11.0) test (c, "matrix c") } // EigenTest object
NBKlepp/fda
scalation_1.2/src/main/scala/scalation/linalgebra/Eigen.scala
Scala
mit
15,006
package sample.hello import akka.actor.{Props, ActorLogging, Actor} import akka.routing.{Broadcast, RoundRobinRoutingLogic, Router, ActorRefRoutee} import scala.collection.mutable import scala.collection.mutable.HashMap import scala.util.control.Breaks._ /** * Created by root on 3/3/15. */ class FragFeaturesCalc extends Actor with ActorLogging { var router ={ val workerCount=Runtime.getRuntime().availableProcessors() val routees=Vector.fill(workerCount*2){ val frag_inc=context.actorOf(Props[FragmentationCalcInception]) context watch frag_inc ActorRefRoutee(frag_inc) } Router(RoundRobinRoutingLogic(), routees) } //val frag3=context.actorOf(Props[Fragmentation3]) def receive ={ case calculate_wks(plag_file_matches,source_file_matches,normalised_term_frequency,normalised_source_term_freq,term_files_occ,id_total,compared_tuple_w_ids) => var start :Int = -1 val compared_ids :Tuple2[Int,Int]=(compared_tuple_w_ids._3,compared_tuple_w_ids._4) for(key1 <- source_file_matches.iterator){ ///gia kathe koinh leksh tou source file for(position <- key1._2.iterator ) { //gia kathe thesh pou vrethike h koinh leksh sto source file for (plag_iter <- plag_file_matches.iterator if (plag_iter._1 == key1._1)) { find_first_seq(position, plag_iter, plag_file_matches, source_file_matches,compared_ids) //briskoume thn prwth emfanish ths sto plag_file } //kai ekteloume th synarthsh pou tha mas dwsei thn prwth koinh akolouthia } } router.route(Broadcast(Terminate_FR_Calcs(source_file_matches,plag_file_matches,normalised_term_frequency,normalised_source_term_freq,term_files_occ,id_total,compared_tuple_w_ids)), sender()) case _ => println("Wrong Data Sent To FragFeaturesCalc Actor!") } def find_first_seq(source_key_pos:Int,plag_iter :(String,mutable.Set[Int]),plag_file_matches :HashMap[String, scala.collection.mutable.Set[Int]] with scala.collection.mutable.MultiMap [String,Int],source_file_matches :HashMap[String, scala.collection.mutable.Set[Int]] with scala.collection.mutable.MultiMap [String,Int],compared_ids :Tuple2[Int,Int]):Unit ={ var start :Int= 0 //epilegoume opoiadhpote apo tis emfaniseis sto keimeno mias kai de mas endiaferei to pou emfanistike var end :Int= 0 var seq_start :String= new String() //for(iter <- plag_file_matches.iterator if(iter._1 == plag_iter._1)) { seq_start = plag_iter._1 for (pos_found <- plag_iter._2.iterator){ //h thesi sthn opoia vrethike h koinh leksh metaksy source kai plagiarised document (sto plagiarised document) start = pos_found end = pos_found //println(pos_found +" and "+iter._1) router.route(calculate_wks2(start,end,seq_start,plag_file_matches,source_key_pos,source_file_matches,compared_ids),sender()) //stelnoume tis lekseis se routees gia thn pio grhgorh dhmiourgia twn koinwn string metaksy twn dyo keimenwn } } } class FragmentationCalcInception extends Actor with ActorLogging { def receive ={ case calculate_wks2(start,end,seq_start,plag_file_matches,source_key_pos,source_file_matches,compared_ids) => //println(seq_start) var start_end_fltr :Array[Int]= new Array[Int](8) start_end_fltr(0)=start //shmeio pou vrethike h arxh tou koinou string sto plag_file start_end_fltr(1)=end //shmeio pou vrethike to telos tou koinou string sto plag_file start_end_fltr(2)=1 //synthiki termatismou ths while (einai 1 otan ektelestike ena apo ta 2 if sth for ths synrathshs pou kaleitai) start_end_fltr(3)=1 //arithmos leksewn tou koinou string start_end_fltr(4)=source_key_pos //shmeio pou vrethike h arxh tou koinou string sto source_file (start) start_end_fltr(5)=source_key_pos //shmeio pou vrethike to telos tou koinou string sto source_file (end) start_end_fltr(6)=compared_ids._1 //source_id start_end_fltr(7)=compared_ids._2 //suspicious file (plag_file) id var seq_str :String=seq_start //h prwth leksh pou vrethike koinh sta 2 sygkrinomena keimena while(start_end_fltr(2)==1){ start_end_fltr(2)=0 val tup_le = find_whole_seq(start_end_fltr, seq_str, plag_file_matches, source_file_matches) seq_str = tup_le._1 //println("start:"+start_end_fltr(0)+"\\t end:"+ start_end_fltr(1)) start_end_fltr(2) = tup_le._2(2) } //println(seq_str+" Gia to word "+seq_start+" sto source file pou vrisketai sth thesh "+source_key_pos+" kai sygrithike me to idio word sto plag file sth thesh "+start) context.actorSelection("/user/plag_analysis/comparing_s_p/returned_matches/classification/fragmentation_features").!(frag_calculate(seq_str,start_end_fltr)) case Terminate_FR_Calcs(source_file_matches,plag_file_matches,normalised_term_frequency,normalised_source_term_freq,term_files_occ,id_total,compared_tuple_w_ids) => context.actorSelection("/user/plag_analysis/comparing_s_p/returned_matches/classification/fragmentation_features").!(FR_Calcs_Routees_Terminated(source_file_matches,plag_file_matches,normalised_term_frequency,normalised_source_term_freq,term_files_occ,id_total,compared_tuple_w_ids)) } def find_whole_seq(start_end_fltr :Array[Int] ,seq_str :String ,plag_file_matches :HashMap[String, scala.collection.mutable.Set[Int]] with scala.collection.mutable.MultiMap [String,Int],source_file_matches :HashMap[String, scala.collection.mutable.Set[Int]] with scala.collection.mutable.MultiMap [String,Int]):(String,Array[Int]) = { //var seq_str: String = new String() var seq_str2 :String=new String() breakable{ for (iter <- plag_file_matches.iterator) { // println(start+" "+iter._2) if (iter._2.contains(start_end_fltr(0) - 1) && source_file_matches.entryExists(iter._1, _==(start_end_fltr(4)-1)) ) { start_end_fltr(0) -= 1 start_end_fltr(2)=1 start_end_fltr(3)+=1 start_end_fltr(4) -= 1 seq_str2 = iter._1 + " " + seq_str break() } if (iter._2.contains(start_end_fltr(1) + 1) && source_file_matches.entryExists(iter._1, _==(start_end_fltr(5) +1)) ) { seq_str2 = seq_str + " " + iter._1 start_end_fltr(1)+= 1 start_end_fltr(2)=1 start_end_fltr(3)+=1 start_end_fltr(5)+=1 break() } } } if(seq_str2.isEmpty()){ seq_str2=seq_str } //if(start_end_fltr(0) >=299 && start_end_fltr(1)<=450) //println(seq_str2+" and " +start_end_fltr(3)) return(seq_str2,start_end_fltr) } }
SteliosKats/Plagiarism_Detection_System_Using_Akka
src/main/scala/sample/hello/FragFeaturesCalc.scala
Scala
cc0-1.0
6,645
package poly.collection /** * Represents a sequence whose elements are sorted. * @author Tongfei Chen * @since 0.1.0 */ trait SortedSeq[T] extends Seq[T] with SortedIterable[T] { self => override def filter(f: T => Boolean): SortedSeq[T] = super[Seq].filter(f).asIfSorted(self.elementOrder) override def filterNot(f: T => Boolean) = filter(!f) //TODO: thenSortBy } abstract class AbstractSortedSeq[T] extends AbstractSeq[T] with SortedSeq[T]
ctongfei/poly-collection
core/src/main/scala/poly/collection/SortedSeq.scala
Scala
mit
457
package io.iohk.ethereum.blockchain.sync import akka.util.ByteString import io.iohk.ethereum.blockchain.sync.fast.SyncStateScheduler.SyncResponse import io.iohk.ethereum.domain.{Account, Address, Blockchain, BlockchainImpl} import io.iohk.ethereum.ledger.InMemoryWorldStateProxy import io.iohk.ethereum.mpt.MerklePatriciaTrie import io.iohk.ethereum.utils.{BlockchainConfig, ByteUtils} object StateSyncUtils extends EphemBlockchainTestSetup { final case class MptNodeData( accountAddress: Address, accountCode: Option[ByteString], accountStorage: Seq[(BigInt, BigInt)], accountBalance: Int ) class TrieProvider(bl: Blockchain, blockchainConfig: BlockchainConfig) { def getNodes(hashes: List[ByteString]) = { hashes.map { hash => val maybeResult = bl.getMptNodeByHash(hash) match { case Some(value) => Some(ByteString(value.encode)) case None => bl.getEvmCodeByHash(hash) } maybeResult match { case Some(result) => SyncResponse(hash, result) case None => throw new RuntimeException("Missing expected data in storage") } } } def buildWorld(accountData: Seq[MptNodeData], existingTree: Option[ByteString] = None): ByteString = { val init: InMemoryWorldStateProxy = bl .getWorldStateProxy( blockNumber = 1, accountStartNonce = blockchainConfig.accountStartNonce, stateRootHash = existingTree.getOrElse(ByteString(MerklePatriciaTrie.EmptyRootHash)), noEmptyAccounts = true, ethCompatibleStorage = blockchainConfig.ethCompatibleStorage ) .asInstanceOf[InMemoryWorldStateProxy] val modifiedWorld = accountData.foldLeft(init) { case (world, data) => val storage = world.getStorage(data.accountAddress) val modifiedStorage = data.accountStorage.foldLeft(storage) { case (s, v) => s.store(v._1, v._2) } val code = world.getCode(data.accountAddress) val worldWithAccAndStorage = world .saveAccount(data.accountAddress, Account.empty().copy(balance = data.accountBalance)) .saveStorage(data.accountAddress, modifiedStorage) val finalWorld = if (data.accountCode.isDefined) worldWithAccAndStorage.saveCode(data.accountAddress, data.accountCode.get) else worldWithAccAndStorage finalWorld } val persisted = InMemoryWorldStateProxy.persistState(modifiedWorld) persisted.stateRootHash } } object TrieProvider { def apply(): TrieProvider = { val freshStorage = getNewStorages new TrieProvider(BlockchainImpl(freshStorage.storages), blockchainConfig) } } def createNodeDataStartingFrom(initialNumber: Int, lastNumber: Int, storageOffset: Int): Seq[MptNodeData] = { (initialNumber until lastNumber).map { i => val address = Address(i) val codeBytes = ByteString(BigInt(i).toByteArray) val storage = (initialNumber until initialNumber + storageOffset).map(s => (BigInt(s), BigInt(s))) val balance = i MptNodeData(address, Some(codeBytes), storage, balance) } } def checkAllDataExists(nodeData: List[MptNodeData], bl: Blockchain, blNumber: BigInt): Boolean = { def go(remaining: List[MptNodeData]): Boolean = { if (remaining.isEmpty) { true } else { val dataToCheck = remaining.head val address = bl.getAccount(dataToCheck.accountAddress, blNumber) val code = address.flatMap(a => bl.getEvmCodeByHash(a.codeHash)) val storageCorrect = dataToCheck.accountStorage.forall { case (key, value) => val stored = bl.getAccountStorageAt(address.get.storageRoot, key, ethCompatibleStorage = true) ByteUtils.toBigInt(stored) == value } if (address.isDefined && code.isDefined && storageCorrect) { go(remaining.tail) } else { false } } } go(nodeData) } }
input-output-hk/etc-client
src/test/scala/io/iohk/ethereum/blockchain/sync/StateSyncUtils.scala
Scala
mit
4,014
package monocle.function import monocle.MonocleSuite import scalaz.std.string._ import scalaz.{IMap, OneAnd} class IndexExample extends MonocleSuite { test("index creates an Optional from a Map, IMap to a value at the index") { (Map("One" -> 1, "Two" -> 2) applyOptional index("One") getOption) shouldEqual Some(1) (IMap("One" -> 1, "Two" -> 2) applyOptional index("One") getOption) shouldEqual Some(1) (Map("One" -> 1, "Two" -> 2) applyOptional index("One") set 2) shouldEqual Map("One" -> 2, "Two" -> 2) (IMap("One" -> 1, "Two" -> 2) applyOptional index("One") set 2) shouldEqual IMap("One" -> 2, "Two" -> 2) } test("index creates an Optional from a List, IList, Vector or Stream to a value at the index") { (List(0,1,2,3) applyOptional index(1) getOption) shouldEqual Some(1) (List(0,1,2,3) applyOptional index(8) getOption) shouldEqual None (Vector(0,1,2,3) applyOptional index(1) modify(_ + 1)) shouldEqual Vector(0,2,2,3) // setting or modifying a value at an index without value is a no op (Stream(0,1,2,3) applyOptional index(64) set 10) shouldEqual Stream(0,1,2,3) } test("index creates an Optional from a OneAnd to a value at the index") { (OneAnd(1, List(2,3)) applyOptional index(0) getOption) shouldEqual Some(1) (OneAnd(1, List(2,3)) applyOptional index(1) getOption) shouldEqual Some(2) } test("index creates an Optional from a String to a Char") { ("Hello World" applyOptional index(2) getOption) shouldEqual Some('l') } test("index creates an Optional from Int to one of its bit") { (3 applyOptional index(0) getOption) shouldEqual Some(true) // true means bit is 1 (4 applyOptional index(0) getOption) shouldEqual Some(false) // false means bit is 0 (0 applyOptional index(79) getOption) shouldEqual None (32 applyOptional index(0) set true) shouldEqual 33 (3 applyOptional index(1) modify(!_)) shouldEqual 1 // since we toggled 2nd bit // update on an incorrect index is a noop (0 applyOptional index(-1) set true) shouldEqual 0 } test("index creates an Optional from Char to one of its bit") { ('x' applyOptional index(0) getOption) shouldEqual Some(false) ('x' applyOptional index(0) set true) shouldEqual 'y' } }
NightRa/Monocle
example/src/test/scala/monocle/function/IndexExample.scala
Scala
mit
2,273
package models case class Whereabouts(location: String = "", other: Option[String] = None) case class PaymentFrequency(frequency: String = "", other: Option[String] = None) { def stringify = frequency + " " + other.getOrElse("") } case class PensionPaymentFrequency(frequency: String, other: Option[String] = None) { def stringify = frequency + " " + other.getOrElse("") } case class ReasonForBeingThere(reason: Option[String] = None, other: Option[String] = None)
Department-for-Work-and-Pensions/ClaimCapture
c3/app/models/WithOther.scala
Scala
mit
475
/* sbt -- Simple Build Tool * Copyright 2010 Mark Harrah */ package sbt import Predef.{ Map, Set, implicitly } // excludes *both 2.10.x conforms and 2.11.x $conforms in source compatible manner. import sbt.internal.util.{ Cache, HList, HNil, InputCache, LinePosition, LineRange, NoPosition, RangePosition, SourcePosition } import sbt.internal.util.FileInfo.{ exists, hash } import sbt.internal.util.Types.{ :+:, idFun } import java.io.File import java.{ util => ju } import java.net.URL import scala.xml.NodeSeq import sbinary.{ DefaultProtocol, Format } // import sbt.internal.librarymanagement.{ ExternalIvyConfiguration, IvyConfiguration, IvyPaths, IvyScala, ModuleSettings, RetrieveConfiguration, SbtExclusionRule, UpdateConfiguration, UpdateReport } // import sbt.librarymanagement.{ Configuration, ExclusionRule, CrossVersion, ModuleID, Patterns } import sbt.internal.librarymanagement._ import sbt.librarymanagement._ import sbt.librarymanagement.RepositoryHelpers._ import Ordering._ import sbt.io.Hash /** * InputCaches for IvyConfiguration, ModuleSettings, and UpdateConfiguration * The InputCaches for a basic data structure is built in two parts. * Given the data structure: * Data[A,B,C, ...] * 1) Define a conversion from Data to the HList A :+: B :+: C :+: ... :+: HNil, * excluding any members that should not be considered for caching * 2) In theory, 1) would be enough and wrapHL would generate InputCache[Data] as long * as all of InputCache[A], InputCache[B], ... exist. However, if any of these child * InputCaches are constructed using wrapHL, you get a diverging implicit error. (I * believe scalac is generating this error as specified, but that the implicits would * be valid and not be infinite. This might take some effort to come up with a new rule * that allows this) * 3) So, we need to explicitly define the intermediate implicits. The general approach is: * {{{ * object LN { * ... Data => HList conversions ... * } * import LN._ * implicit dataCache: InputCache[Data] = wrapHL * * object L(N-1) ... * }}} * Each Data in LN only uses implicits from L(N-1). * This way, higher levels (higher N) cannot see the HList conversions of subcomponents but can * use the explicitly defined subcomponent implicits and there is no divergence. * 4) Ideally, diverging implicits could be relaxed so that the ... = wrapIn lines could be removed. */ object CacheIvy { def password(s: Option[String]) = new Array[Byte](0) def names(s: Iterable[Configuration]): Set[String] = s.map(_.name).toSet import Cache._ implicit def wrapHL[W, H, T <: HList](implicit f: W => H :+: T, cache: InputCache[H :+: T]): InputCache[W] = Cache.wrapIn(f, cache) lazy val excludeMap: Format[Map[ModuleID, Set[String]]] = implicitly lazy val updateIC: InputCache[IvyConfiguration :+: ModuleSettings :+: UpdateConfiguration :+: HNil] = implicitly /* def deliverIC: InputCache[IvyConfiguration :+: ModuleSettings :+: DeliverConfiguration :+: HNil] = implicitly def publishIC: InputCache[IvyConfiguration :+: ModuleSettings :+: PublishConfiguration :+: HNil] = implicitly*/ lazy val moduleIDSeqIC: InputCache[Seq[ModuleID]] = implicitly lazy val modulePositionMapFormat: Format[Map[ModuleID, SourcePosition]] = implicitly implicit lazy val updateReportFormat: Format[UpdateReport] = { import DefaultProtocol.{ StringFormat, FileFormat } wrap[UpdateReport, (File, Seq[ConfigurationReport], UpdateStats, Map[File, Long])](rep => (rep.cachedDescriptor, rep.configurations, rep.stats, rep.stamps), { case (cd, cs, stats, stamps) => new UpdateReport(cd, cs, stats, stamps) }) } implicit def updateStatsFormat: Format[UpdateStats] = wrap[UpdateStats, (Long, Long, Long)](us => (us.resolveTime, us.downloadTime, us.downloadSize), { case (rt, dt, ds) => new UpdateStats(rt, dt, ds, true) }) implicit def confReportFormat(implicit m: Format[String], mr: Format[Seq[ModuleReport]], oar: Format[Seq[OrganizationArtifactReport]]): Format[ConfigurationReport] = wrap[ConfigurationReport, (String, Seq[ModuleReport], Seq[OrganizationArtifactReport])](r => (r.configuration, r.modules, r.details), { case (c, m, d) => new ConfigurationReport(c, m, d) }) implicit def moduleReportFormat(implicit cf: Format[Seq[Caller]], ff: Format[File]): Format[ModuleReport] = { wrap[ModuleReport, (ModuleID, Seq[(Artifact, File)], Seq[Artifact], Option[String], Option[Long], Option[String], Option[String], Boolean, Option[String], Option[String], Option[String], Option[String], Map[String, String], Option[Boolean], Option[String], Seq[String], Seq[(String, Option[String])], Seq[Caller])]( m => (m.module, m.artifacts, m.missingArtifacts, m.status, m.publicationDate map { _.getTime }, m.resolver, m.artifactResolver, m.evicted, m.evictedData, m.evictedReason, m.problem, m.homepage, m.extraAttributes, m.isDefault, m.branch, m.configurations, m.licenses, m.callers), { case (m, as, ms, s, pd, r, a, e, ed, er, p, h, ea, d, b, cs, ls, ks) => new ModuleReport(m, as, ms, s, pd map { new ju.Date(_) }, r, a, e, ed, er, p, h, ea, d, b, cs, ls, ks) }) } implicit def artifactFormat(implicit sf: Format[String], uf: Format[Option[URL]]): Format[Artifact] = { wrap[Artifact, (String, String, String, Option[String], Seq[Configuration], Option[URL], Map[String, String])]( a => (a.name, a.`type`, a.extension, a.classifier, a.configurations.toSeq, a.url, a.extraAttributes), { case (n, t, x, c, cs, u, e) => Artifact(n, t, x, c, cs, u, e) } ) } implicit def organizationArtifactReportFormat(implicit sf: Format[String], bf: Format[Boolean], df: Format[Seq[ModuleReport]]): Format[OrganizationArtifactReport] = wrap[OrganizationArtifactReport, (String, String, Seq[ModuleReport])](m => (m.organization, m.name, m.modules), { case (o, n, r) => OrganizationArtifactReport(o, n, r) }) implicit def callerFormat: Format[Caller] = wrap[Caller, (ModuleID, Seq[String], Map[String, String], Boolean, Boolean, Boolean, Boolean)](c => (c.caller, c.callerConfigurations, c.callerExtraAttributes, c.isForceDependency, c.isChangingDependency, c.isTransitiveDependency, c.isDirectlyForceDependency), { case (c, cc, ea, fd, cd, td, df) => new Caller(c, cc, ea, fd, cd, td, df) }) implicit def exclusionRuleFormat(implicit sf: Format[String]): Format[InclExclRule] = wrap[InclExclRule, (String, String, String, Seq[String])](e => (e.organization, e.name, e.artifact, e.configurations), { case (o, n, a, cs) => InclExclRule(o, n, a, cs) }) implicit def crossVersionFormat: Format[CrossVersion] = wrap(crossToInt, crossFromInt) implicit def sourcePositionFormat: Format[SourcePosition] = wrap[SourcePosition, (Int, String, Int, Int)]( { case NoPosition => (0, "", 0, 0) case LinePosition(p, s) => (1, p, s, 0) case RangePosition(p, LineRange(s, e)) => (2, p, s, e) }, { case (0, _, _, _) => NoPosition case (1, p, s, _) => LinePosition(p, s) case (2, p, s, e) => RangePosition(p, LineRange(s, e)) } ) private[this] final val DisabledValue = 0 private[this] final val BinaryValue = 1 private[this] final val FullValue = 2 import CrossVersion.{ Binary, Disabled, Full } private[this] val crossFromInt = (i: Int) => i match { case BinaryValue => new Binary(idFun); case FullValue => new Full(idFun); case _ => Disabled } private[this] val crossToInt = (c: CrossVersion) => c match { case Disabled => 0; case b: Binary => BinaryValue; case f: Full => FullValue } implicit def moduleIDFormat(implicit sf: Format[String], bf: Format[Boolean]): Format[ModuleID] = wrap[ModuleID, ((String, String, String, Option[String], Option[String]), (Boolean, Boolean, Boolean, Seq[Artifact], Seq[InclusionRule], Seq[ExclusionRule], Map[String, String], CrossVersion))]( m => ((m.organization, m.name, m.revision, m.configurations, m.branchName), (m.isChanging, m.isTransitive, m.isForce, m.explicitArtifacts, m.inclusions, m.exclusions, m.extraAttributes, m.crossVersion)), { case ((o, n, r, cs, br), (ch, t, f, as, incl, excl, x, cv)) => ModuleID(o, n, r, cs, ch, t, f, as, incl, excl, x, cv, br) } ) // For some reason sbinary seems to detect unserialized instance Set[ModuleID] to be not equal. #1620 implicit def moduleSetIC: InputCache[Set[ModuleID]] = { implicit def toSeq(ms: Set[ModuleID]): Seq[ModuleID] = ms.toSeq.sortBy { _.toString } wrapIn } implicit def configurationFormat(implicit sf: Format[String]): Format[Configuration] = wrap[Configuration, String](_.name, s => new Configuration(s)) implicit def classpathFormat = { import DefaultProtocol.FileFormat implicitly[Format[Map[String, Seq[File]]]] } object L5 { implicit def inlineIvyToHL = (i: InlineIvyConfiguration) => i.paths :+: i.resolvers :+: i.otherResolvers :+: i.moduleConfigurations :+: i.localOnly :+: i.checksums :+: HNil } import L5._ implicit def inlineIvyIC: InputCache[InlineIvyConfiguration] = wrapIn implicit def moduleSettingsIC: InputCache[ModuleSettings] = unionInputCache[ModuleSettings, PomConfiguration :+: InlineConfiguration :+: InlineConfigurationWithExcludes :+: IvyFileConfiguration :+: HNil] implicit def ivyConfigurationIC: InputCache[IvyConfiguration] = unionInputCache[IvyConfiguration, InlineIvyConfiguration :+: ExternalIvyConfiguration :+: HNil] object L4 { implicit val inlineWithExcludesToHL = (c: InlineConfigurationWithExcludes) => c.module :+: c.dependencies :+: c.ivyXML :+: c.configurations :+: c.defaultConfiguration.map(_.name) :+: c.ivyScala :+: c.validate :+: c.overrides :+: c.excludes :+: HNil implicit def moduleConfToHL = (m: ModuleConfiguration) => m.organization :+: m.name :+: m.revision :+: m.resolver :+: HNil implicit def inlineToHL = (c: InlineConfiguration) => c.module :+: c.dependencies :+: c.ivyXML :+: c.configurations :+: c.defaultConfiguration.map(_.name) :+: c.ivyScala :+: c.validate :+: c.overrides :+: HNil } import L4._ implicit def inlineWithExcludesIC: InputCache[InlineConfigurationWithExcludes] = wrapIn implicit def inlineIC: InputCache[InlineConfiguration] = wrapIn implicit def moduleConfIC: InputCache[ModuleConfiguration] = wrapIn object L3 { implicit def mavenCacheToHL = (m: MavenCache) => m.name :+: m.rootFile.getAbsolutePath :+: HNil implicit def mavenRToHL = (m: MavenRepository) => m.name :+: m.root :+: HNil implicit def fileRToHL = (r: FileRepository) => r.name :+: r.configuration :+: r.patterns :+: HNil implicit def urlRToHL = (u: URLRepository) => u.name :+: u.patterns :+: HNil implicit def sshRToHL = (s: SshRepository) => s.name :+: s.connection :+: s.patterns :+: s.publishPermissions :+: HNil implicit def sftpRToHL = (s: SftpRepository) => s.name :+: s.connection :+: s.patterns :+: HNil implicit def rawRToHL = (r: RawRepository) => r.name :+: r.resolver.getClass.getName :+: HNil implicit def chainRToHL = (c: ChainedResolver) => c.name :+: c.resolvers :+: HNil implicit def moduleToHL = (m: ModuleID) => m.organization :+: m.name :+: m.revision :+: m.configurations :+: m.isChanging :+: m.isTransitive :+: m.explicitArtifacts :+: m.exclusions :+: m.inclusions :+: m.extraAttributes :+: m.crossVersion :+: HNil } import L3._ implicit lazy val chainedIC: InputCache[ChainedResolver] = InputCache.lzy(wrapIn) implicit lazy val resolverIC: InputCache[Resolver] = unionInputCache[Resolver, ChainedResolver :+: MavenRepository :+: MavenCache :+: FileRepository :+: URLRepository :+: SshRepository :+: SftpRepository :+: RawRepository :+: HNil] implicit def moduleIC: InputCache[ModuleID] = wrapIn implicitly[InputCache[Seq[Configuration]]] object L2 { implicit def updateConfToHL = (u: UpdateConfiguration) => u.retrieve :+: u.missingOk :+: HNil implicit def pomConfigurationHL = (c: PomConfiguration) => hash(c.file) :+: c.ivyScala :+: c.validate :+: HNil implicit def ivyFileConfigurationHL = (c: IvyFileConfiguration) => hash(c.file) :+: c.ivyScala :+: c.validate :+: HNil implicit def sshConnectionToHL = (s: SshConnection) => s.authentication :+: s.hostname :+: s.port :+: HNil implicit def artifactToHL = (a: Artifact) => a.name :+: a.`type` :+: a.extension :+: a.classifier :+: names(a.configurations) :+: a.url :+: a.extraAttributes :+: HNil implicit def inclExclToHL = (e: InclExclRule) => e.organization :+: e.name :+: e.artifact :+: e.configurations :+: HNil implicit def sbtExclusionToHL = (e: SbtExclusionRule) => e.organization :+: e.name :+: e.artifact :+: e.configurations :+: e.crossVersion :+: HNil implicit def crossToHL = (c: CrossVersion) => crossToInt(c) :+: HNil /* implicit def deliverConfToHL = (p: DeliverConfiguration) => p.deliverIvyPattern :+: p.status :+: p.configurations :+: HNil implicit def publishConfToHL = (p: PublishConfiguration) => p.ivyFile :+: p.resolverName :+: p.artifacts :+: HNil*/ } import L2._ implicit def updateConfIC: InputCache[UpdateConfiguration] = wrapIn implicit def pomIC: InputCache[PomConfiguration] = wrapIn implicit def ivyFileIC: InputCache[IvyFileConfiguration] = wrapIn implicit def connectionIC: InputCache[SshConnection] = wrapIn implicit def artifactIC: InputCache[Artifact] = wrapIn implicit def exclusionIC: InputCache[InclExclRule] = wrapIn implicit def sbtExclusionIC: InputCache[SbtExclusionRule] = wrapIn implicit def crossVersionIC: InputCache[CrossVersion] = wrapIn /* implicit def publishConfIC: InputCache[PublishConfiguration] = wrapIn implicit def deliverConfIC: InputCache[DeliverConfiguration] = wrapIn*/ object L1 { implicit def retrieveToHL = (r: RetrieveConfiguration) => exists(r.retrieveDirectory) :+: r.outputPattern :+: HNil implicit def ivyPathsToHL = (p: IvyPaths) => exists(p.baseDirectory) :+: p.ivyHome.map(exists.apply) :+: HNil implicit def ivyScalaHL = (i: IvyScala) => i.scalaFullVersion :+: i.scalaBinaryVersion :+: names(i.configurations) :+: i.checkExplicit :+: i.filterImplicit :+: HNil implicit def configurationToHL = (c: Configuration) => c.name :+: c.description :+: c.isPublic :+: names(c.extendsConfigs) :+: c.transitive :+: HNil implicit def passwordToHL = (s: PasswordAuthentication) => Hash(s.user) :+: password(s.password) :+: HNil implicit def keyFileToHL = (s: KeyFileAuthentication) => Hash(s.user) :+: hash(s.keyfile) :+: password(s.password) :+: HNil implicit def patternsToHL = (p: Patterns) => p.ivyPatterns :+: p.artifactPatterns :+: p.isMavenCompatible :+: HNil implicit def fileConfToHL = (f: FileConfiguration) => f.isLocal :+: f.isTransactional :+: HNil implicit def externalIvyConfigurationToHL = (e: ExternalIvyConfiguration) => exists(e.baseDirectory) :+: Hash.contentsIfLocal(e.uri) :+: HNil } import L1._ implicit def ivyScalaIC: InputCache[IvyScala] = wrapIn implicit def ivyPathsIC: InputCache[IvyPaths] = wrapIn implicit def retrieveIC: InputCache[RetrieveConfiguration] = wrapIn implicit def patternsIC: InputCache[Patterns] = wrapIn implicit def fileConfIC: InputCache[FileConfiguration] = wrapIn implicit def extIvyIC: InputCache[ExternalIvyConfiguration] = wrapIn implicit def confIC: InputCache[Configuration] = wrapIn implicit def authIC: InputCache[SshAuthentication] = unionInputCache[SshAuthentication, PasswordAuthentication :+: KeyFileAuthentication :+: HNil] }
dansanduleac/sbt
main/actions/src/main/scala/sbt/CacheIvy.scala
Scala
bsd-3-clause
15,600
package mesosphere.marathon import mesosphere.chaos.http.HttpConf import org.apache.mesos.Protos.{ FrameworkID, FrameworkInfo, Credential } import org.apache.mesos.{ SchedulerDriver, MesosSchedulerDriver } import com.google.protobuf.ByteString import java.io.{ FileInputStream, IOException } /** * Wrapper class for the scheduler */ object MarathonSchedulerDriver { var driver: Option[SchedulerDriver] = None var scheduler: Option[MarathonScheduler] = None def newDriver(config: MarathonConf, httpConfig: HttpConf, newScheduler: MarathonScheduler, frameworkId: Option[FrameworkID]): SchedulerDriver = { val frameworkInfoBuilder = FrameworkInfo.newBuilder() .setName(config.frameworkName()) .setFailoverTimeout(config.mesosFailoverTimeout().toDouble) .setUser(config.mesosUser()) .setCheckpoint(config.checkpoint()) // Set the role, if provided. config.mesosRole.get.foreach(frameworkInfoBuilder.setRole) // Set the ID, if provided frameworkId.foreach(frameworkInfoBuilder.setId) if (config.webuiUrl.isSupplied) { frameworkInfoBuilder.setWebuiUrl(config.webuiUrl()) } else if (httpConfig.sslKeystorePath.isDefined) { // ssl enabled, use https frameworkInfoBuilder.setWebuiUrl(s"https://${config.hostname()}:${httpConfig.httpsPort()}") } else { // ssl disabled, use http frameworkInfoBuilder.setWebuiUrl(s"http://${config.hostname()}:${httpConfig.httpPort()}") } // set the authentication principal, if provided config.mesosAuthenticationPrincipal.get.foreach(frameworkInfoBuilder.setPrincipal) val credential: Option[Credential] = config.mesosAuthenticationPrincipal.get.map { principal => val credentialBuilder = Credential.newBuilder() .setPrincipal(principal) config.mesosAuthenticationSecretFile.get.foreach { secretFile => try { val secretBytes = ByteString.readFrom(new FileInputStream(secretFile)) credentialBuilder.setSecret(secretBytes) } catch { case cause: Throwable => throw new IOException(s"Error reading authentication secret from file [$secretFile]", cause) } } credentialBuilder.build() } val frameworkInfo = frameworkInfoBuilder.build() val newDriver: MesosSchedulerDriver = credential match { case Some(cred) => new MesosSchedulerDriver(newScheduler, frameworkInfo, config.mesosMaster(), cred) case None => new MesosSchedulerDriver(newScheduler, frameworkInfo, config.mesosMaster()) } driver = Some(newDriver) scheduler = Some(newScheduler) newDriver } }
14Zen/marathon
src/main/scala/mesosphere/marathon/MarathonSchedulerDriver.scala
Scala
apache-2.0
2,756
/* * Copyright 2015-2016 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package models import scala.slick.driver.PostgresDriver.simple._ import play.api.Play.current import play.api.data.Forms._ case class Availability(name: String, date: String, parkid: Int, id: Option[Int] = None) class Availabilities(tag: Tag) extends Table[Availability](tag, "AVAILABILITIES") { // Auto Increment the id primary key column def id = column[Int]("ID", O.PrimaryKey, O.AutoInc) // The name can't be null def name = column[String]("NAME", O.NotNull) def date = column[String]("DATE", O.NotNull) def parkid = column[Int]("PARKID", O.NotNull) // the * projection (e.g. select * ...) auto-transforms the tupled // column values to / from a User def * = (name, date, parkid, id.?) <> (Availability.tupled, Availability.unapply) } object Availabilities { val db = play.api.db.slick.DB val availability = TableQuery[Availabilities] def all: List[Availability] = db.withSession { implicit session => availability.sortBy(_.date.asc.nullsFirst).list } def create(newavailability: Availability) = db.withTransaction{ implicit session => availability += newavailability } def find(parkid: Int): Availability = db.withSession{ implicit session => availability.filter(_.parkid === parkid).first } def find(date: String): Option[Availability] = db.withSession{ implicit session => availability.filter(_.date === date).firstOption } def find(date: String, parkid: Int): Option[Availability] = db.withSession { implicit session => availability.filter(a => a.date === date && a.parkid === parkid).firstOption } def update(updateAvailability: Availability) = db.withTransaction{ implicit session => availability.filter(_.id === updateAvailability.id).update(updateAvailability) } def delete(id: Int) = db.withTransaction{ implicit session => availability.filter(_.id === id).delete } def delete(date: String, parkid: Int) = db.withSession { implicit session => availability.filter(a => a.date === date && a.parkid === parkid).delete } }
iandow/bookfast
app/models/Availabilities.scala
Scala
mit
2,605
package funsets import org.scalatest.FunSuite import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner /** * This class is a test suite for the methods in object FunSets. To run * the test suite, you can either: * - run the "test" command in the SBT console * - right-click the file in eclipse and chose "Run As" - "JUnit Test" */ @RunWith(classOf[JUnitRunner]) class FunSetSuite extends FunSuite { /** * Link to the scaladoc - very clear and detailed tutorial of FunSuite * * http://doc.scalatest.org/1.9.1/index.html#org.scalatest.FunSuite * * Operators * - test * - ignore * - pending */ /** * Tests are written using the "test" operator and the "assert" method. */ test("string take") { val message = "hello, world" assert(message.take(5) == "hello") } /** * For ScalaTest tests, there exists a special equality operator "===" that * can be used inside "assert". If the assertion fails, the two values will * be printed in the error message. Otherwise, when using "==", the test * error message will only say "assertion failed", without showing the values. * * Try it out! Change the values so that the assertion fails, and look at the * error message. */ test("adding ints") { assert(1 + 2 === 3) } import FunSets._ test("contains is implemented") { assert(contains(x => true, 100)) } /** * When writing tests, one would often like to re-use certain values for multiple * tests. For instance, we would like to create an Int-set and have multiple test * about it. * * Instead of copy-pasting the code for creating the set into every test, we can * store it in the test class using a val: * * val s1 = singletonSet(1) * * However, what happens if the method "singletonSet" has a bug and crashes? Then * the test methods are not even executed, because creating an instance of the * test class fails! * * Therefore, we put the shared values into a separate trait (traits are like * abstract classes), and create an instance inside each test method. * */ trait TestSets { val s1 = singletonSet(1) val s2 = singletonSet(2) val s3 = singletonSet(3) val u12 = union(s1, s2) val u23 = union(s2, s3) val u123 = union(u12, u23) def greaterThan2(x: Int) = x > 2 } /** * This test is currently disabled (by using "ignore") because the method * "singletonSet" is not yet implemented and the test would fail. * * Once you finish your implementation of "singletonSet", exchange the * function "ignore" by "test". */ test("singletonSet(1) contains 1") { /** * We create a new instance of the "TestSets" trait, this gives us access * to the values "s1" to "s3". */ new TestSets { /** * The string argument of "assert" is a message that is printed in case * the test fails. This helps identifying which assertion failed. */ assert(contains(s1, 1), "Singleton") } } test("union contains all elements") { new TestSets { val s = union(s1, s2) assert(contains(s, 1), "Union 1") assert(contains(s, 2), "Union 2") assert(!contains(s, 3), "Union 3") assert(contains(u123, 1), "Union 123 contains 1") assert(contains(u123, 2), "Union 123 contains 2") assert(contains(u123, 3), "Union 123 contains 3") assert(!contains(u123, 4), "Union 123 does not contains 4") } } test("diff") { new TestSets { assert(contains(diff(u12, singletonSet(1)), 2), "union with singleton") assert(contains(diff(s1, s2), 1), "diff complex") } } test("filter") { new TestSets { assert(contains(filter(u123, greaterThan2), 3), "filter 2") assert(!contains(filter(u123, greaterThan2), 1), "filter 1") } } test("forall") { new TestSets { assert(!forall(u123, greaterThan2)) assert(forall(u123, (x: Int) => x > 0)) } } test("exists") { new TestSets { def isNegative(x: Int) = x < 0 val u = union(u123, singletonSet(-1)) val um12 = union(u, singletonSet(-2)) assert(exists(u, isNegative), "negative number exist in set -1 1 2 3") assert(exists(um12, isNegative), "negative number exist in set -2 -1 1 2 3") assert(!exists(u123, isNegative), "negative number does not exist in set 1 2 3") } } test("map") { new TestSets { def mult2(x: Int) = x * 2 def sqr(x: Int) = x * x def u123mult2 = map(u123, mult2) def u123sqr = map(u123, sqr) assert(contains(u123mult2, 4), "a set 1 2 3 mult2 contains 4") assert(!contains(u123mult2, 1), "a set 1 2 3 mult2 does not contain 1") assert(contains(u123sqr, 4), "a set 1 2 3 squared contains 4") assert(!contains(u123sqr, 2), "a set 1 2 3 squared does not contain 2") } } }
panga/progfun-assignments
funsets/src/test/scala/funsets/FunSetSuite.scala
Scala
mit
4,903
package org.allenai.plugins.archetypes import org.allenai.plugins.NodeJsPlugin import org.allenai.plugins.NodeJsPlugin.autoImport._ import org.allenai.plugins.DeployPlugin import com.typesafe.sbt.packager import com.typesafe.sbt.SbtNativePackager.Universal import com.typesafe.sbt.packager.MappingsHelper import com.typesafe.sbt.packager.universal.UniversalPlugin import spray.revolver.RevolverPlugin.Revolver import sbt._ import sbt.Keys._ /** Plugin that configures a webapp for building. This makes the `re-start`, `test`, and `clean` * tasks execute the appropriate node builds, configures node for deploy with the `deploy` command. */ object WebappPlugin extends AutoPlugin { override def requires: Plugins = WebServicePlugin && NodeJsPlugin && DeployPlugin override lazy val projectSettings: Seq[Setting[_]] = Seq( // Expect the node project in a "webapp" subdirectory. NodeKeys.nodeProjectDir in Npm := (baseDirectory in thisProject).value / "webapp", // Run "npm watch" when we run a re-start. Revolver.reStart <<= Revolver.reStart.dependsOn(NodeKeys.nwatch in Npm), // Kill background watches on re-stop. Revolver.reStop <<= Revolver.reStop.dependsOn(NodeKeys.unwatch in Npm), // Run client-side tests when tests are run. test in Test <<= (test in Test).dependsOn(test in Npm), // Clean node files on clean. cleanFiles += (NodeKeys.nodeProjectTarget in Npm).value, // Build the node project on stage (for deploys). UniversalPlugin.autoImport.stage <<= UniversalPlugin.autoImport.stage.dependsOn(DeployPlugin.npmBuildTask), // Copy the built node project into our staging directory, too! mappings in Universal <++= (NodeKeys.nodeProjectTarget in Npm) map MappingsHelper.directory ) }
ryanai3/sbt-plugins
src/main/scala/org/allenai/plugins/archetypes/WebappPlugin.scala
Scala
apache-2.0
1,770
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.server import kafka.network._ import kafka.utils._ import kafka.metrics.KafkaMetricsGroup import java.util.concurrent.{CountDownLatch, TimeUnit} import java.util.concurrent.atomic.AtomicInteger import com.yammer.metrics.core.Meter import org.apache.kafka.common.internals.FatalExitError import org.apache.kafka.common.utils.{KafkaThread, Time} import scala.collection.mutable import scala.jdk.CollectionConverters._ trait ApiRequestHandler { def handle(request: RequestChannel.Request, requestLocal: RequestLocal): Unit } /** * A thread that answers kafka requests. */ class KafkaRequestHandler(id: Int, brokerId: Int, val aggregateIdleMeter: Meter, val totalHandlerThreads: AtomicInteger, val requestChannel: RequestChannel, apis: ApiRequestHandler, time: Time) extends Runnable with Logging { this.logIdent = s"[Kafka Request Handler $id on Broker $brokerId], " private val shutdownComplete = new CountDownLatch(1) private val requestLocal = RequestLocal.withThreadConfinedCaching @volatile private var stopped = false def run(): Unit = { while (!stopped) { // We use a single meter for aggregate idle percentage for the thread pool. // Since meter is calculated as total_recorded_value / time_window and // time_window is independent of the number of threads, each recorded idle // time should be discounted by # threads. val startSelectTime = time.nanoseconds val req = requestChannel.receiveRequest(300) val endTime = time.nanoseconds val idleTime = endTime - startSelectTime aggregateIdleMeter.mark(idleTime / totalHandlerThreads.get) req match { case RequestChannel.ShutdownRequest => debug(s"Kafka request handler $id on broker $brokerId received shut down command") completeShutdown() return case request: RequestChannel.Request => try { request.requestDequeueTimeNanos = endTime trace(s"Kafka request handler $id on broker $brokerId handling request $request") apis.handle(request, requestLocal) } catch { case e: FatalExitError => completeShutdown() Exit.exit(e.statusCode) case e: Throwable => error("Exception when handling request", e) } finally { request.releaseBuffer() } case null => // continue } } completeShutdown() } private def completeShutdown(): Unit = { requestLocal.close() shutdownComplete.countDown() } def stop(): Unit = { stopped = true } def initiateShutdown(): Unit = requestChannel.sendShutdownRequest() def awaitShutdown(): Unit = shutdownComplete.await() } class KafkaRequestHandlerPool(val brokerId: Int, val requestChannel: RequestChannel, val apis: ApiRequestHandler, time: Time, numThreads: Int, requestHandlerAvgIdleMetricName: String, logAndThreadNamePrefix : String) extends Logging with KafkaMetricsGroup { private val threadPoolSize: AtomicInteger = new AtomicInteger(numThreads) /* a meter to track the average free capacity of the request handlers */ private val aggregateIdleMeter = newMeter(requestHandlerAvgIdleMetricName, "percent", TimeUnit.NANOSECONDS) this.logIdent = "[" + logAndThreadNamePrefix + " Kafka Request Handler on Broker " + brokerId + "], " val runnables = new mutable.ArrayBuffer[KafkaRequestHandler](numThreads) for (i <- 0 until numThreads) { createHandler(i) } def createHandler(id: Int): Unit = synchronized { runnables += new KafkaRequestHandler(id, brokerId, aggregateIdleMeter, threadPoolSize, requestChannel, apis, time) KafkaThread.daemon(logAndThreadNamePrefix + "-kafka-request-handler-" + id, runnables(id)).start() } def resizeThreadPool(newSize: Int): Unit = synchronized { val currentSize = threadPoolSize.get info(s"Resizing request handler thread pool size from $currentSize to $newSize") if (newSize > currentSize) { for (i <- currentSize until newSize) { createHandler(i) } } else if (newSize < currentSize) { for (i <- 1 to (currentSize - newSize)) { runnables.remove(currentSize - i).stop() } } threadPoolSize.set(newSize) } def shutdown(): Unit = synchronized { info("shutting down") for (handler <- runnables) handler.initiateShutdown() for (handler <- runnables) handler.awaitShutdown() info("shut down completely") } } class BrokerTopicMetrics(name: Option[String]) extends KafkaMetricsGroup { val tags: scala.collection.Map[String, String] = name match { case None => Map.empty case Some(topic) => Map("topic" -> topic) } case class MeterWrapper(metricType: String, eventType: String) { @volatile private var lazyMeter: Meter = _ private val meterLock = new Object def meter(): Meter = { var meter = lazyMeter if (meter == null) { meterLock synchronized { meter = lazyMeter if (meter == null) { meter = newMeter(metricType, eventType, TimeUnit.SECONDS, tags) lazyMeter = meter } } } meter } def close(): Unit = meterLock synchronized { if (lazyMeter != null) { removeMetric(metricType, tags) lazyMeter = null } } if (tags.isEmpty) // greedily initialize the general topic metrics meter() } // an internal map for "lazy initialization" of certain metrics private val metricTypeMap = new Pool[String, MeterWrapper]() metricTypeMap.putAll(Map( BrokerTopicStats.MessagesInPerSec -> MeterWrapper(BrokerTopicStats.MessagesInPerSec, "messages"), BrokerTopicStats.BytesInPerSec -> MeterWrapper(BrokerTopicStats.BytesInPerSec, "bytes"), BrokerTopicStats.BytesOutPerSec -> MeterWrapper(BrokerTopicStats.BytesOutPerSec, "bytes"), BrokerTopicStats.BytesRejectedPerSec -> MeterWrapper(BrokerTopicStats.BytesRejectedPerSec, "bytes"), BrokerTopicStats.FailedProduceRequestsPerSec -> MeterWrapper(BrokerTopicStats.FailedProduceRequestsPerSec, "requests"), BrokerTopicStats.FailedFetchRequestsPerSec -> MeterWrapper(BrokerTopicStats.FailedFetchRequestsPerSec, "requests"), BrokerTopicStats.TotalProduceRequestsPerSec -> MeterWrapper(BrokerTopicStats.TotalProduceRequestsPerSec, "requests"), BrokerTopicStats.TotalFetchRequestsPerSec -> MeterWrapper(BrokerTopicStats.TotalFetchRequestsPerSec, "requests"), BrokerTopicStats.FetchMessageConversionsPerSec -> MeterWrapper(BrokerTopicStats.FetchMessageConversionsPerSec, "requests"), BrokerTopicStats.ProduceMessageConversionsPerSec -> MeterWrapper(BrokerTopicStats.ProduceMessageConversionsPerSec, "requests"), BrokerTopicStats.NoKeyCompactedTopicRecordsPerSec -> MeterWrapper(BrokerTopicStats.NoKeyCompactedTopicRecordsPerSec, "requests"), BrokerTopicStats.InvalidMagicNumberRecordsPerSec -> MeterWrapper(BrokerTopicStats.InvalidMagicNumberRecordsPerSec, "requests"), BrokerTopicStats.InvalidMessageCrcRecordsPerSec -> MeterWrapper(BrokerTopicStats.InvalidMessageCrcRecordsPerSec, "requests"), BrokerTopicStats.InvalidOffsetOrSequenceRecordsPerSec -> MeterWrapper(BrokerTopicStats.InvalidOffsetOrSequenceRecordsPerSec, "requests") ).asJava) if (name.isEmpty) { metricTypeMap.put(BrokerTopicStats.ReplicationBytesInPerSec, MeterWrapper(BrokerTopicStats.ReplicationBytesInPerSec, "bytes")) metricTypeMap.put(BrokerTopicStats.ReplicationBytesOutPerSec, MeterWrapper(BrokerTopicStats.ReplicationBytesOutPerSec, "bytes")) metricTypeMap.put(BrokerTopicStats.ReassignmentBytesInPerSec, MeterWrapper(BrokerTopicStats.ReassignmentBytesInPerSec, "bytes")) metricTypeMap.put(BrokerTopicStats.ReassignmentBytesOutPerSec, MeterWrapper(BrokerTopicStats.ReassignmentBytesOutPerSec, "bytes")) } // used for testing only def metricMap: Map[String, MeterWrapper] = metricTypeMap.toMap def messagesInRate: Meter = metricTypeMap.get(BrokerTopicStats.MessagesInPerSec).meter() def bytesInRate: Meter = metricTypeMap.get(BrokerTopicStats.BytesInPerSec).meter() def bytesOutRate: Meter = metricTypeMap.get(BrokerTopicStats.BytesOutPerSec).meter() def bytesRejectedRate: Meter = metricTypeMap.get(BrokerTopicStats.BytesRejectedPerSec).meter() private[server] def replicationBytesInRate: Option[Meter] = if (name.isEmpty) Some(metricTypeMap.get(BrokerTopicStats.ReplicationBytesInPerSec).meter()) else None private[server] def replicationBytesOutRate: Option[Meter] = if (name.isEmpty) Some(metricTypeMap.get(BrokerTopicStats.ReplicationBytesOutPerSec).meter()) else None private[server] def reassignmentBytesInPerSec: Option[Meter] = if (name.isEmpty) Some(metricTypeMap.get(BrokerTopicStats.ReassignmentBytesInPerSec).meter()) else None private[server] def reassignmentBytesOutPerSec: Option[Meter] = if (name.isEmpty) Some(metricTypeMap.get(BrokerTopicStats.ReassignmentBytesOutPerSec).meter()) else None def failedProduceRequestRate: Meter = metricTypeMap.get(BrokerTopicStats.FailedProduceRequestsPerSec).meter() def failedFetchRequestRate: Meter = metricTypeMap.get(BrokerTopicStats.FailedFetchRequestsPerSec).meter() def totalProduceRequestRate: Meter = metricTypeMap.get(BrokerTopicStats.TotalProduceRequestsPerSec).meter() def totalFetchRequestRate: Meter = metricTypeMap.get(BrokerTopicStats.TotalFetchRequestsPerSec).meter() def fetchMessageConversionsRate: Meter = metricTypeMap.get(BrokerTopicStats.FetchMessageConversionsPerSec).meter() def produceMessageConversionsRate: Meter = metricTypeMap.get(BrokerTopicStats.ProduceMessageConversionsPerSec).meter() def noKeyCompactedTopicRecordsPerSec: Meter = metricTypeMap.get(BrokerTopicStats.NoKeyCompactedTopicRecordsPerSec).meter() def invalidMagicNumberRecordsPerSec: Meter = metricTypeMap.get(BrokerTopicStats.InvalidMagicNumberRecordsPerSec).meter() def invalidMessageCrcRecordsPerSec: Meter = metricTypeMap.get(BrokerTopicStats.InvalidMessageCrcRecordsPerSec).meter() def invalidOffsetOrSequenceRecordsPerSec: Meter = metricTypeMap.get(BrokerTopicStats.InvalidOffsetOrSequenceRecordsPerSec).meter() def closeMetric(metricType: String): Unit = { val meter = metricTypeMap.get(metricType) if (meter != null) meter.close() } def close(): Unit = metricTypeMap.values.foreach(_.close()) } object BrokerTopicStats { val MessagesInPerSec = "MessagesInPerSec" val BytesInPerSec = "BytesInPerSec" val BytesOutPerSec = "BytesOutPerSec" val BytesRejectedPerSec = "BytesRejectedPerSec" val ReplicationBytesInPerSec = "ReplicationBytesInPerSec" val ReplicationBytesOutPerSec = "ReplicationBytesOutPerSec" val FailedProduceRequestsPerSec = "FailedProduceRequestsPerSec" val FailedFetchRequestsPerSec = "FailedFetchRequestsPerSec" val TotalProduceRequestsPerSec = "TotalProduceRequestsPerSec" val TotalFetchRequestsPerSec = "TotalFetchRequestsPerSec" val FetchMessageConversionsPerSec = "FetchMessageConversionsPerSec" val ProduceMessageConversionsPerSec = "ProduceMessageConversionsPerSec" val ReassignmentBytesInPerSec = "ReassignmentBytesInPerSec" val ReassignmentBytesOutPerSec = "ReassignmentBytesOutPerSec" // These following topics are for LogValidator for better debugging on failed records val NoKeyCompactedTopicRecordsPerSec = "NoKeyCompactedTopicRecordsPerSec" val InvalidMagicNumberRecordsPerSec = "InvalidMagicNumberRecordsPerSec" val InvalidMessageCrcRecordsPerSec = "InvalidMessageCrcRecordsPerSec" val InvalidOffsetOrSequenceRecordsPerSec = "InvalidOffsetOrSequenceRecordsPerSec" private val valueFactory = (k: String) => new BrokerTopicMetrics(Some(k)) } class BrokerTopicStats extends Logging { import BrokerTopicStats._ private val stats = new Pool[String, BrokerTopicMetrics](Some(valueFactory)) val allTopicsStats = new BrokerTopicMetrics(None) def topicStats(topic: String): BrokerTopicMetrics = stats.getAndMaybePut(topic) def updateReplicationBytesIn(value: Long): Unit = { allTopicsStats.replicationBytesInRate.foreach { metric => metric.mark(value) } } private def updateReplicationBytesOut(value: Long): Unit = { allTopicsStats.replicationBytesOutRate.foreach { metric => metric.mark(value) } } def updateReassignmentBytesIn(value: Long): Unit = { allTopicsStats.reassignmentBytesInPerSec.foreach { metric => metric.mark(value) } } def updateReassignmentBytesOut(value: Long): Unit = { allTopicsStats.reassignmentBytesOutPerSec.foreach { metric => metric.mark(value) } } // This method only removes metrics only used for leader def removeOldLeaderMetrics(topic: String): Unit = { val topicMetrics = topicStats(topic) if (topicMetrics != null) { topicMetrics.closeMetric(BrokerTopicStats.MessagesInPerSec) topicMetrics.closeMetric(BrokerTopicStats.BytesInPerSec) topicMetrics.closeMetric(BrokerTopicStats.BytesRejectedPerSec) topicMetrics.closeMetric(BrokerTopicStats.FailedProduceRequestsPerSec) topicMetrics.closeMetric(BrokerTopicStats.TotalProduceRequestsPerSec) topicMetrics.closeMetric(BrokerTopicStats.ProduceMessageConversionsPerSec) topicMetrics.closeMetric(BrokerTopicStats.ReplicationBytesOutPerSec) topicMetrics.closeMetric(BrokerTopicStats.ReassignmentBytesOutPerSec) } } // This method only removes metrics only used for follower def removeOldFollowerMetrics(topic: String): Unit = { val topicMetrics = topicStats(topic) if (topicMetrics != null) { topicMetrics.closeMetric(BrokerTopicStats.ReplicationBytesInPerSec) topicMetrics.closeMetric(BrokerTopicStats.ReassignmentBytesInPerSec) } } def removeMetrics(topic: String): Unit = { val metrics = stats.remove(topic) if (metrics != null) metrics.close() } def updateBytesOut(topic: String, isFollower: Boolean, isReassignment: Boolean, value: Long): Unit = { if (isFollower) { if (isReassignment) updateReassignmentBytesOut(value) updateReplicationBytesOut(value) } else { topicStats(topic).bytesOutRate.mark(value) allTopicsStats.bytesOutRate.mark(value) } } def close(): Unit = { allTopicsStats.close() stats.values.foreach(_.close()) info("Broker and topic stats closed") } }
TiVo/kafka
core/src/main/scala/kafka/server/KafkaRequestHandler.scala
Scala
apache-2.0
15,527
import sbt._ import Keys._ object BuildSettings { val VERSION = "0.1" lazy val basicSettings = seq( version := VERSION, organization := "org.hip", description := "Most Simple Discrete Event Simulation Engine for the JVM", startYear := Some(2014), scalaVersion := Dependencies.myScalaVersion, scalacOptions := Seq( "-encoding", "UTF-8", "-feature", "-unchecked", "-deprecation", "-language:_", "-optimize" ), parallelExecution in Test := false ) lazy val msDeseSettings = basicSettings }
MagnusAk78/ms-dese-jvm
project/BuildSettings.scala
Scala
mit
629
/* * Databinder: a simple bridge from Wicket to Hibernate * Copyright (C) 2006 Nathan Hamblen [email protected] * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ package net.databinder.dispatch.components import java.net.URI import javax.servlet.http.HttpServletRequest import org.apache.wicket.Component import org.apache.wicket.ResourceReference import org.apache.wicket.markup.html.WebPage import org.apache.wicket.markup.html.basic.Label import org.apache.wicket.markup.html.link.Link import org.apache.wicket.markup.html.link.ResourceLink import org.apache.wicket.model.Model import org.apache.wicket.protocol.http.WebRequest /** * Page describing connection problem and offering source for external script. * @author Nathan Hamblen */ class ConnectionErrorPage(e: Throwable) extends WebPage { def << (c: Component) = add(c) this << new Label("error", new Model(e.getMessage())) this << new Link("retry") { override def onClick() { continueToOriginalDestination() } } this << new ResourceLink("script", ConnectionErrorPage.scriptFile) def req = getRequest.asInstanceOf[WebRequest].getHttpServletRequest val full= URI.create(req.getRequestURL().toString()) val path = new Model(full.resolve(req.getContextPath() + "/" + urlFor(ConnectionErrorPage.scriptFile))) this << new Label("href", path).setRenderBodyOnly(true) } object ConnectionErrorPage { val scriptFile = new ResourceReference(classOf[ConnectionErrorPage], "databinder-dispatch.rb") }
n8han/Databinder-for-Wicket
databinder-dispatch-components/src/main/scala/net/databinder/dispatch/components/ConnectionErrorPage.scala
Scala
lgpl-2.1
2,190
package eu.gruchala.typelevel.full object B_DependentTypes { val desc = """ |A dependent type is a type that depends on a value. |In languages like Java we use types (and define them upfront) |to give us information about values and to put constraints on them. |With Dependent Types we are more flexible because we can compute types |and define stronger constraints on values. """.stripMargin object PathDependentTypes { class Foo { class Bar } val foo1 = new Foo val foo2 = new Foo //# means any Bar val a: Foo#Bar = new foo1.Bar val b: Foo#Bar = new foo2.Bar // . means Bar from given instance / path val c: foo1.Bar = new foo1.Bar // val d: foo1.Bar = new foo2.Bar // can't assign foo2.Bar type to foo1.Bar } object ParameterDependentTypes { trait Foo { type Bar def value: Bar } def foo(f: Foo): f.Bar = f.value } }
leszekgruchala/typelevel-programming-scala
src/main/scala/eu/gruchala/typelevel/full/B_DependentTypes.scala
Scala
mit
949
package chk.commons.util import scala.concurrent.Future import scala.language.implicitConversions import scala.reflect.ClassTag trait AnyUtils { implicit class AnyRefOps[T <: AnyRef](underlying: T) { /** * Check if the instance is null * * @return true if null, false otherwise. */ def isNull: Boolean = underlying eq null } implicit class AnyOps[T <: Any](underlying: T)(implicit tag: ClassTag[T]) { /** * Wrap to Option * @return */ def option: Option[T] = Option(underlying) /** * Wrap to Future * @return */ def future: Future[T] = Future.successful(underlying) /** * Helper for chaining function call. Just link Unix tee command. It always returns the object it’s called on. * * @param f block to execute the object * @return the object it's called on */ def tap(f: T => Unit): T = { f(underlying) underlying } /** alias of tap */ def >|(f: T => Unit): T = tap(f) /** check null before calling tap */ def tapNotNull(f: T => Unit): T = { if(underlying != null) tap(f) else underlying } /** alias of tapNotNull */ def ?>|(f: T => Unit): T = tapNotNull(f) /** * Helper for chaining function call. Just link Unix pipeline. * * @param f block to execute the object * @return the object after the block execute */ def pipe[That](f: T => That): That = f(underlying) /** alias of pipe */ def |[That](f: T => That): That = pipe(f) } implicit def anyToOption[T <: Any](underlying: T): Option[T] = Option(underlying) implicit def anyToFuture[T <: Any](underlying: T): Future[T] = Future.successful(underlying) implicit def anyToString[T <: Any](underlying: T): String = underlying.toString }
chainkite/chk-commons
core/src/main/scala/chk/commons/util/AnyUtils.scala
Scala
mit
1,823
package io.vamp.operation.sla import java.time.OffsetDateTime import akka.actor._ import io.vamp.common.akka.IoC._ import io.vamp.common.akka._ import io.vamp.model.artifact.DeploymentService.Status.Phase.Initiated import io.vamp.model.artifact._ import io.vamp.model.event.{ Event, EventQuery, TimeRange } import io.vamp.model.notification.{ DeEscalate, Escalate, SlaEvent } import io.vamp.model.reader.{ MegaByte, Quantity } import io.vamp.operation.notification.{ InternalServerError, OperationNotificationProvider, UnsupportedEscalationType } import io.vamp.operation.sla.EscalationActor.EscalationProcessAll import io.vamp.persistence.{ ArtifactPaginationSupport, EventPaginationSupport, PersistenceActor } import io.vamp.pulse.PulseActor import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration class EscalationSchedulerActor extends SchedulerActor with OperationNotificationProvider { private var windowStart: Option[OffsetDateTime] = None def tick() = windowStart match { case Some(from) β‡’ val to = OffsetDateTime.now().withNano(0) actorFor[EscalationActor] ! EscalationProcessAll(from, to) windowStart = Some(to) case None β‡’ } override def schedule(period: FiniteDuration, initialDelay: FiniteDuration) = { period.toNanos match { case interval if interval > 0 β‡’ if (windowStart.isEmpty) windowStart = Some(OffsetDateTime.now().withNano(0)) case _ β‡’ windowStart = None } super.schedule(period, initialDelay) } } object EscalationActor { case class EscalationProcessAll(from: OffsetDateTime, to: OffsetDateTime) } class EscalationActor extends ArtifactPaginationSupport with EventPaginationSupport with CommonSupportForActors with OperationNotificationProvider { def tags = Set("escalation") def receive: Receive = { case EscalationProcessAll(from, to) β‡’ implicit val timeout = PersistenceActor.timeout() forAll(allArtifacts[Deployment], check(from, to)) } private def check(from: OffsetDateTime, to: OffsetDateTime)(deployments: List[Deployment]): Unit = { def escalation(deployment: Deployment, cluster: DeploymentCluster, sla: Sla, escalate: Boolean) = { escalateToOne(deployment, cluster, ToOneEscalation("", Map(), sla.escalations), escalate) match { case Some(d) β‡’ actorFor[PersistenceActor] ! PersistenceActor.Update(d) case _ β‡’ } } deployments.foreach(deployment β‡’ { try { deployment.clusters.foreach(cluster β‡’ cluster.sla match { case None β‡’ case Some(sla) β‡’ forEach(querySlaEvents(deployment, cluster, from, to), { case Escalate(d, c, _) if d.name == deployment.name && c.name == cluster.name β‡’ escalation(deployment, cluster, sla, escalate = true) case DeEscalate(d, c, _) if d.name == deployment.name && c.name == cluster.name β‡’ escalation(deployment, cluster, sla, escalate = false) case _ β‡’ }: (SlaEvent) β‡’ Unit) }) } catch { case any: Throwable β‡’ reportException(InternalServerError(any)) } }) } private def querySlaEvents(deployment: Deployment, cluster: DeploymentCluster, from: OffsetDateTime, to: OffsetDateTime): Future[Stream[Future[List[SlaEvent]]]] = { implicit val timeout = PulseActor.timeout() val eventQuery = EventQuery(SlaEvent.slaTags(deployment, cluster), None, Some(TimeRange(Some(from), Some(to), includeLower = false, includeUpper = true))) collectEach[Event, SlaEvent](allEvents(eventQuery), { case event if Escalate.tags.forall(event.tags.contains) β‡’ Escalate(deployment, cluster, event.timestamp) case event if DeEscalate.tags.forall(event.tags.contains) β‡’ DeEscalate(deployment, cluster, event.timestamp) }) } private def escalateToAll(deployment: Deployment, cluster: DeploymentCluster, escalations: List[Escalation], escalate: Boolean): Option[Deployment] = { log.debug(s"to all escalation: ${deployment.name}/${cluster.name}") escalations.foldLeft[Option[Deployment]](None)((op1, op2) β‡’ op1 match { case Some(d) β‡’ escalateToOne(d, cluster, op2, escalate) case None β‡’ escalateToOne(deployment, cluster, op2, escalate) }) } private def escalateToOne(deployment: Deployment, cluster: DeploymentCluster, escalation: Escalation, escalate: Boolean): Option[Deployment] = { log.debug(s"to one escalation: ${deployment.name}/${cluster.name}") escalation match { case e: ToAllEscalation β‡’ escalateToAll(deployment, cluster, e.escalations, escalate) case e: ToOneEscalation β‡’ (if (escalate) e.escalations else e.escalations.reverse).foldLeft[Option[Deployment]](None)((op1, op2) β‡’ if (op1.isDefined) op1 else escalateToOne(deployment, cluster, op2, escalate)) case e: ScaleEscalation[_] β‡’ scaleEscalation(deployment, cluster, e, escalate) case e: GenericEscalation β‡’ info(UnsupportedEscalationType(e.`type`)) None case e: Escalation β‡’ throwException(UnsupportedEscalationType(e.name)) } } private def scaleEscalation(deployment: Deployment, cluster: DeploymentCluster, escalation: ScaleEscalation[_], escalate: Boolean): Option[Deployment] = { log.debug(s"scale escalation: ${deployment.name}/${cluster.name}") def commit(targetCluster: DeploymentCluster, scale: DefaultScale): Option[Deployment] = { // Scale only the first service. Option(deployment.copy(clusters = deployment.clusters.map(c β‡’ { if (c.name == targetCluster.name) c.copy(services = c.services match { case head :: tail β‡’ head.copy(scale = Some(scale), status = head.status.copy(phase = Initiated())) :: tail case Nil β‡’ Nil }) else c }))) } (escalation.targetCluster match { case None β‡’ deployment.clusters.find(_.name == cluster.name) case Some(name) β‡’ deployment.clusters.find(_.name == name) match { case None β‡’ None case Some(c) β‡’ Some(c) } }) match { case None β‡’ None case Some(targetCluster) β‡’ // Scale only the first service. val scale = targetCluster.services.head.scale.get escalation match { case ScaleInstancesEscalation(_, _, minimum, maximum, scaleBy, _) β‡’ val instances = if (escalate) scale.instances + scaleBy else scale.instances - scaleBy if (instances <= maximum && instances >= minimum) { log.info(s"scale instances: ${deployment.name}/${targetCluster.name} to $instances") commit(targetCluster, scale.copy(instances = instances.toInt)) } else { log.debug(s"scale instances not within boundaries: ${deployment.name}/${targetCluster.name} is already ${scale.instances}") None } case ScaleCpuEscalation(_, _, minimum, maximum, scaleBy, _) β‡’ val cpu = if (escalate) scale.cpu.value + scaleBy else scale.cpu.value - scaleBy if (cpu <= maximum && cpu >= minimum) { log.info(s"scale cpu: ${deployment.name}/${targetCluster.name} to $cpu") commit(targetCluster, scale.copy(cpu = Quantity(cpu))) } else { log.debug(s"scale cpu not within boundaries: ${deployment.name}/${targetCluster.name} is already ${scale.cpu}") None } case ScaleMemoryEscalation(_, _, minimum, maximum, scaleBy, _) β‡’ val memory = if (escalate) scale.memory.value + scaleBy else scale.memory.value - scaleBy if (memory <= maximum && memory >= minimum) { log.info(s"scale memory: ${deployment.name}/${targetCluster.name} to $memory") commit(targetCluster, scale.copy(memory = MegaByte(memory))) } else { log.debug(s"scale memory not within boundaries: ${deployment.name}/${targetCluster.name} is already ${scale.memory}") None } } } } }
magneticio/vamp
operation/src/main/scala/io/vamp/operation/sla/EscalationActor.scala
Scala
apache-2.0
8,170
package io.prediction.examples.experimental.trimapp import io.prediction.controller.PPreparator import io.prediction.data.storage.Event import org.apache.spark.SparkContext import org.apache.spark.SparkContext._ import org.apache.spark.rdd.RDD /* class Preparator extends PPreparator[TrainingData, PreparedData] { def prepare(sc: SparkContext, trainingData: TrainingData): PreparedData = { new PreparedData(events = trainingData.events) } } class PreparedData( val events: RDD[Event] ) extends Serializable */
ch33hau/PredictionIO
examples/experimental/scala-parallel-trim-app/src/main/scala/Preparator.scala
Scala
apache-2.0
527
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql import java.beans.Introspector import java.util.Properties import java.util.concurrent.atomic.AtomicReference import org.apache.spark.SparkContext import org.apache.spark.annotation.{DeveloperApi, Experimental} import org.apache.spark.api.java.{JavaRDD, JavaSparkContext} import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.analysis._ import org.apache.spark.sql.catalyst.errors.DialectException import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.optimizer.{DefaultOptimizer, Optimizer} import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan} import org.apache.spark.sql.catalyst.rules.{Rule, RuleExecutor} import org.apache.spark.sql.catalyst.{ParserDialect, ScalaReflection, _} import org.apache.spark.sql.execution.joins.BroadcastHashJoin import org.apache.spark.sql.execution.{Filter, _} import org.apache.spark.sql.sources._ import org.apache.spark.sql.types._ import org.apache.spark.sql.ui.{SQLListener, SQLTab} import org.apache.spark.util.Utils import scala.collection.JavaConversions._ import scala.collection.immutable import scala.language.implicitConversions import scala.reflect.runtime.universe.TypeTag import scala.util.control.NonFatal /** * The entry point for working with structured data (rows and columns) in Spark. Allows the * creation of [[DataFrame]] objects as well as the execution of SQL queries. * * @groupname basic Basic Operations * @groupname ddl_ops Persistent Catalog DDL * @groupname cachemgmt Cached Table Management * @groupname genericdata Generic Data Sources * @groupname specificdata Specific Data Sources * @groupname config Configuration * @groupname dataframes Custom DataFrame Creation * @groupname Ungrouped Support functions for language integrated queries. * * @since 1.0.0 */ class SQLContext(@transient val sparkContext: SparkContext) extends org.apache.spark.Logging with Serializable { self => def this(sparkContext: JavaSparkContext) = this(sparkContext.sc) /** * @return Spark SQL configuration */ protected[sql] def conf = currentSession().conf // `listener` should be only used in the driver @transient private[sql] val listener = new SQLListener(this) sparkContext.addSparkListener(listener) sparkContext.ui.foreach(new SQLTab(this, _)) /** * Set Spark SQL configuration properties. * * @group config * @since 1.0.0 */ def setConf(props: Properties): Unit = conf.setConf(props) /** * Set the given Spark SQL configuration property. * * @group config * @since 1.0.0 */ def setConf(key: String, value: String): Unit = conf.setConf(key, value) /** * Return the value of Spark SQL configuration property for the given key. * * @group config * @since 1.0.0 */ def getConf(key: String): String = conf.getConf(key) /** * Return the value of Spark SQL configuration property for the given key. If the key is not set * yet, return `defaultValue`. * * @group config * @since 1.0.0 */ def getConf(key: String, defaultValue: String): String = conf.getConf(key, defaultValue) /** * Return all the configuration properties that have been set (i.e. not the default). * This creates a new copy of the config properties in the form of a Map. * * @group config * @since 1.0.0 */ def getAllConfs: immutable.Map[String, String] = conf.getAllConfs // TODO how to handle the temp table per user session? @transient protected[sql] lazy val catalog: Catalog = new SimpleCatalog(conf) // TODO how to handle the temp function per user session? @transient protected[sql] lazy val functionRegistry: FunctionRegistry = new SimpleFunctionRegistry(conf) @transient protected[sql] lazy val analyzer: Analyzer = new Analyzer(catalog, functionRegistry, conf) { override val extendedResolutionRules = ExtractPythonUdfs :: sources.PreInsertCastAndRename :: Nil override val extendedCheckRules = Seq( sources.PreWriteCheck(catalog) ) } @transient protected[sql] lazy val optimizer: Optimizer = DefaultOptimizer @transient protected[sql] val ddlParser = new DDLParser(sqlParser.parse(_)) @transient protected[sql] val sqlParser = new SparkSQLParser(getSQLDialect().parse(_)) protected[sql] def getSQLDialect(): ParserDialect = { try { val clazz = Utils.classForName(dialectClassName) clazz.newInstance().asInstanceOf[ParserDialect] } catch { case NonFatal(e) => // Since we didn't find the available SQL Dialect, it will fail even for SET command: // SET spark.sql.dialect=sql; Let's reset as default dialect automatically. val dialect = conf.dialect // reset the sql dialect conf.unsetConf(SQLConf.DIALECT) // throw out the exception, and the default sql dialect will take effect for next query. throw new DialectException( s"""Instantiating dialect '$dialect' failed. |Reverting to default dialect '${conf.dialect}'""".stripMargin, e) } } protected[sql] def parseSql(sql: String): LogicalPlan = ddlParser.parse(sql, false) protected[sql] def executeSql(sql: String): this.QueryExecution = executePlan(parseSql(sql)) protected[sql] def executePlan(plan: LogicalPlan) = new this.QueryExecution(plan) @transient protected[sql] val tlSession = new ThreadLocal[SQLSession]() { override def initialValue: SQLSession = defaultSession } @transient protected[sql] val defaultSession = createSession() protected[sql] def dialectClassName = if (conf.dialect == "sql") { classOf[DefaultParserDialect].getCanonicalName } else { conf.dialect } { // We extract spark sql settings from SparkContext's conf and put them to // Spark SQL's conf. // First, we populate the SQLConf (conf). So, we can make sure that other values using // those settings in their construction can get the correct settings. // For example, metadataHive in HiveContext may need both spark.sql.hive.metastore.version // and spark.sql.hive.metastore.jars to get correctly constructed. val properties = new Properties sparkContext.getConf.getAll.foreach { case (key, value) if key.startsWith("spark.sql") => properties.setProperty(key, value) case _ => } // We directly put those settings to conf to avoid of calling setConf, which may have // side-effects. For example, in HiveContext, setConf may cause executionHive and metadataHive // get constructed. If we call setConf directly, the constructed metadataHive may have // wrong settings, or the construction may fail. conf.setConf(properties) // After we have populated SQLConf, we call setConf to populate other confs in the subclass // (e.g. hiveconf in HiveContext). properties.foreach { case (key, value) => setConf(key, value) } } @transient protected[sql] val cacheManager = new CacheManager(this) /** * :: Experimental :: * A collection of methods that are considered experimental, but can be used to hook into * the query planner for advanced functionality. * * @group basic * @since 1.3.0 */ @Experimental @transient val experimental: ExperimentalMethods = new ExperimentalMethods(this) /** * :: Experimental :: * Returns a [[DataFrame]] with no rows or columns. * * @group basic * @since 1.3.0 */ @Experimental @transient lazy val emptyDataFrame: DataFrame = createDataFrame(sparkContext.emptyRDD[Row], StructType(Nil)) /** * A collection of methods for registering user-defined functions (UDF). * * The following example registers a Scala closure as UDF: * {{{ * sqlContext.udf.register("myUdf", (arg1: Int, arg2: String) => arg2 + arg1) * }}} * * The following example registers a UDF in Java: * {{{ * sqlContext.udf().register("myUDF", * new UDF2<Integer, String, String>() { * @Override * public String call(Integer arg1, String arg2) { * return arg2 + arg1; * } * }, DataTypes.StringType); * }}} * * Or, to use Java 8 lambda syntax: * {{{ * sqlContext.udf().register("myUDF", * (Integer arg1, String arg2) -> arg2 + arg1, * DataTypes.StringType); * }}} * * @group basic * @since 1.3.0 * TODO move to SQLSession? */ @transient val udf: UDFRegistration = new UDFRegistration(this) /** * Returns true if the table is currently cached in-memory. * @group cachemgmt * @since 1.3.0 */ def isCached(tableName: String): Boolean = cacheManager.isCached(tableName) /** * Caches the specified table in-memory. * @group cachemgmt * @since 1.3.0 */ def cacheTable(tableName: String): Unit = cacheManager.cacheTable(tableName) /** * Removes the specified table from the in-memory cache. * @group cachemgmt * @since 1.3.0 */ def uncacheTable(tableName: String): Unit = cacheManager.uncacheTable(tableName) /** * Removes all cached tables from the in-memory cache. * @since 1.3.0 */ def clearCache(): Unit = cacheManager.clearCache() // scalastyle:off // Disable style checker so "implicits" object can start with lowercase i /** * :: Experimental :: * (Scala-specific) Implicit methods available in Scala for converting * common Scala objects into [[DataFrame]]s. * * {{{ * val sqlContext = new SQLContext(sc) * import sqlContext.implicits._ * }}} * * @group basic * @since 1.3.0 */ @Experimental object implicits extends Serializable { // scalastyle:on /** * Converts $"col name" into an [[Column]]. * @since 1.3.0 */ implicit class StringToColumn(val sc: StringContext) { def $(args: Any*): ColumnName = { new ColumnName(sc.s(args : _*)) } } /** * An implicit conversion that turns a Scala `Symbol` into a [[Column]]. * @since 1.3.0 */ implicit def symbolToColumn(s: Symbol): ColumnName = new ColumnName(s.name) /** * Creates a DataFrame from an RDD of case classes or tuples. * @since 1.3.0 */ implicit def rddToDataFrameHolder[A <: Product : TypeTag](rdd: RDD[A]): DataFrameHolder = { DataFrameHolder(self.createDataFrame(rdd)) } /** * Creates a DataFrame from a local Seq of Product. * @since 1.3.0 */ implicit def localSeqToDataFrameHolder[A <: Product : TypeTag](data: Seq[A]): DataFrameHolder = { DataFrameHolder(self.createDataFrame(data)) } // Do NOT add more implicit conversions. They are likely to break source compatibility by // making existing implicit conversions ambiguous. In particular, RDD[Double] is dangerous // because of [[DoubleRDDFunctions]]. /** * Creates a single column DataFrame from an RDD[Int]. * @since 1.3.0 */ implicit def intRddToDataFrameHolder(data: RDD[Int]): DataFrameHolder = { val dataType = IntegerType val rows = data.mapPartitions { iter => val row = new SpecificMutableRow(dataType :: Nil) iter.map { v => row.setInt(0, v) row: Row } } DataFrameHolder(self.createDataFrame(rows, StructType(StructField("_1", dataType) :: Nil))) } /** * Creates a single column DataFrame from an RDD[Long]. * @since 1.3.0 */ implicit def longRddToDataFrameHolder(data: RDD[Long]): DataFrameHolder = { val dataType = LongType val rows = data.mapPartitions { iter => val row = new SpecificMutableRow(dataType :: Nil) iter.map { v => row.setLong(0, v) row: Row } } DataFrameHolder(self.createDataFrame(rows, StructType(StructField("_1", dataType) :: Nil))) } /** * Creates a single column DataFrame from an RDD[String]. * @since 1.3.0 */ implicit def stringRddToDataFrameHolder(data: RDD[String]): DataFrameHolder = { val dataType = StringType val rows = data.mapPartitions { iter => val row = new SpecificMutableRow(dataType :: Nil) iter.map { v => row.setString(0, v) row: Row } } DataFrameHolder(self.createDataFrame(rows, StructType(StructField("_1", dataType) :: Nil))) } } /** * :: Experimental :: * Creates a DataFrame from an RDD of case classes. * * @group dataframes * @since 1.3.0 */ @Experimental def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = { SparkPlan.currentContext.set(self) val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType] val attributeSeq = schema.toAttributes val rowRDD = RDDConversions.productToRowRdd(rdd, schema.map(_.dataType)) DataFrame(self, LogicalRDD(attributeSeq, rowRDD)(self)) } /** * :: Experimental :: * Creates a DataFrame from a local Seq of Product. * * @group dataframes * @since 1.3.0 */ @Experimental def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = { SparkPlan.currentContext.set(self) val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType] val attributeSeq = schema.toAttributes DataFrame(self, LocalRelation.fromProduct(attributeSeq, data)) } /** * Convert a [[BaseRelation]] created for external data sources into a [[DataFrame]]. * * @group dataframes * @since 1.3.0 */ def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame = { DataFrame(this, LogicalRelation(baseRelation)) } /** * :: DeveloperApi :: * Creates a [[DataFrame]] from an [[RDD]] containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * Example: * {{{ * import org.apache.spark.sql._ * import org.apache.spark.sql.types._ * val sqlContext = new org.apache.spark.sql.SQLContext(sc) * * val schema = * StructType( * StructField("name", StringType, false) :: * StructField("age", IntegerType, true) :: Nil) * * val people = * sc.textFile("examples/src/main/resources/people.txt").map( * _.split(",")).map(p => Row(p(0), p(1).trim.toInt)) * val dataFrame = sqlContext.createDataFrame(people, schema) * dataFrame.printSchema * // root * // |-- name: string (nullable = false) * // |-- age: integer (nullable = true) * * dataFrame.registerTempTable("people") * sqlContext.sql("select name from people").collect.foreach(println) * }}} * * @group dataframes * @since 1.3.0 */ @DeveloperApi def createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame = { createDataFrame(rowRDD, schema, needsConversion = true) } /** * Creates a DataFrame from an RDD[Row]. User can specify whether the input rows should be * converted to Catalyst rows. */ private[sql] def createDataFrame(rowRDD: RDD[Row], schema: StructType, needsConversion: Boolean) = { // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val catalystRows = if (needsConversion) { val converter = CatalystTypeConverters.createToCatalystConverter(schema) rowRDD.map(converter(_).asInstanceOf[Row]) } else { rowRDD } val logicalPlan = LogicalRDD(schema.toAttributes, catalystRows)(self) DataFrame(this, logicalPlan) } /** * :: DeveloperApi :: * Creates a [[DataFrame]] from an [[JavaRDD]] containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * * @group dataframes * @since 1.3.0 */ @DeveloperApi def createDataFrame(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = { createDataFrame(rowRDD.rdd, schema) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @group dataframes * @since 1.3.0 */ def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame = { val attributeSeq = getSchema(beanClass) val className = beanClass.getName val rowRdd = rdd.mapPartitions { iter => // BeanInfo is not serializable so we must rediscover it remotely for each partition. val localBeanInfo = Introspector.getBeanInfo( Class.forName(className, true, Utils.getContextOrSparkClassLoader)) val extractors = localBeanInfo.getPropertyDescriptors.filterNot(_.getName == "class").map(_.getReadMethod) iter.map { row => new GenericRow( extractors.zip(attributeSeq).map { case (e, attr) => CatalystTypeConverters.convertToCatalyst(e.invoke(row), attr.dataType) }.toArray[Any] ) : Row } } DataFrame(this, LogicalRDD(attributeSeq, rowRdd)(this)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @group dataframes * @since 1.3.0 */ def createDataFrame(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd.rdd, beanClass) } /** * :: Experimental :: * Returns a [[DataFrameReader]] that can be used to read data in as a [[DataFrame]]. * {{{ * sqlContext.read.parquet("/path/to/file.parquet") * sqlContext.read.schema(schema).json("/path/to/file.json") * }}} * * @group genericdata * @since 1.4.0 */ @Experimental def read: DataFrameReader = new DataFrameReader(this) /** * :: Experimental :: * Creates an external table from the given path and returns the corresponding DataFrame. * It will use the default data source configured by spark.sql.sources.default. * * @group ddl_ops * @since 1.3.0 */ @Experimental def createExternalTable(tableName: String, path: String): DataFrame = { val dataSourceName = conf.defaultDataSourceName createExternalTable(tableName, path, dataSourceName) } /** * :: Experimental :: * Creates an external table from the given path based on a data source * and returns the corresponding DataFrame. * * @group ddl_ops * @since 1.3.0 */ @Experimental def createExternalTable( tableName: String, path: String, source: String): DataFrame = { createExternalTable(tableName, source, Map("path" -> path)) } /** * :: Experimental :: * Creates an external table from the given path based on a data source and a set of options. * Then, returns the corresponding DataFrame. * * @group ddl_ops * @since 1.3.0 */ @Experimental def createExternalTable( tableName: String, source: String, options: java.util.Map[String, String]): DataFrame = { createExternalTable(tableName, source, options.toMap) } /** * :: Experimental :: * (Scala-specific) * Creates an external table from the given path based on a data source and a set of options. * Then, returns the corresponding DataFrame. * * @group ddl_ops * @since 1.3.0 */ @Experimental def createExternalTable( tableName: String, source: String, options: Map[String, String]): DataFrame = { val cmd = CreateTableUsing( tableName, userSpecifiedSchema = None, source, temporary = false, options, allowExisting = false, managedIfNoPath = false) executePlan(cmd).toRdd table(tableName) } /** * :: Experimental :: * Create an external table from the given path based on a data source, a schema and * a set of options. Then, returns the corresponding DataFrame. * * @group ddl_ops * @since 1.3.0 */ @Experimental def createExternalTable( tableName: String, source: String, schema: StructType, options: java.util.Map[String, String]): DataFrame = { createExternalTable(tableName, source, schema, options.toMap) } /** * :: Experimental :: * (Scala-specific) * Create an external table from the given path based on a data source, a schema and * a set of options. Then, returns the corresponding DataFrame. * * @group ddl_ops * @since 1.3.0 */ @Experimental def createExternalTable( tableName: String, source: String, schema: StructType, options: Map[String, String]): DataFrame = { val cmd = CreateTableUsing( tableName, userSpecifiedSchema = Some(schema), source, temporary = false, options, allowExisting = false, managedIfNoPath = false) executePlan(cmd).toRdd table(tableName) } /** * Registers the given [[DataFrame]] as a temporary table in the catalog. Temporary tables exist * only during the lifetime of this instance of SQLContext. */ private[sql] def registerDataFrameAsTable(df: DataFrame, tableName: String): Unit = { catalog.registerTable(Seq(tableName), df.logicalPlan) } /** * Drops the temporary table with the given table name in the catalog. If the table has been * cached/persisted before, it's also unpersisted. * * @param tableName the name of the table to be unregistered. * * @group basic * @since 1.3.0 */ def dropTempTable(tableName: String): Unit = { cacheManager.tryUncacheQuery(table(tableName)) catalog.unregisterTable(Seq(tableName)) } /** * :: Experimental :: * Creates a [[DataFrame]] with a single [[LongType]] column named `id`, containing elements * in an range from `start` to `end`(exclusive) with step value 1. * * @since 1.4.0 * @group dataframe */ @Experimental def range(start: Long, end: Long): DataFrame = { createDataFrame( sparkContext.range(start, end).map(Row(_)), StructType(StructField("id", LongType, nullable = false) :: Nil)) } /** * :: Experimental :: * Creates a [[DataFrame]] with a single [[LongType]] column named `id`, containing elements * in an range from 0 to `end`(exclusive) with step value 1. * * @since 1.4.0 * @group dataframe */ @Experimental def range(end: Long): DataFrame = range(0, end) /** * :: Experimental :: * Creates a [[DataFrame]] with a single [[LongType]] column named `id`, containing elements * in an range from `start` to `end`(exclusive) with an step value, with partition number * specified. * * @since 1.4.0 * @group dataframe */ @Experimental def range(start: Long, end: Long, step: Long, numPartitions: Int): DataFrame = { createDataFrame( sparkContext.range(start, end, step, numPartitions).map(Row(_)), StructType(StructField("id", LongType, nullable = false) :: Nil)) } /** * Executes a SQL query using Spark, returning the result as a [[DataFrame]]. The dialect that is * used for SQL parsing can be configured with 'spark.sql.dialect'. * * @group basic * @since 1.3.0 */ def sql(sqlText: String): DataFrame = { DataFrame(this, parseSql(sqlText)) } /** * Returns the specified table as a [[DataFrame]]. * * @group ddl_ops * @since 1.3.0 */ def table(tableName: String): DataFrame = DataFrame(this, catalog.lookupRelation(Seq(tableName))) /** * Returns a [[DataFrame]] containing names of existing tables in the current database. * The returned DataFrame has two columns, tableName and isTemporary (a Boolean * indicating if a table is a temporary one or not). * * @group ddl_ops * @since 1.3.0 */ def tables(): DataFrame = { DataFrame(this, ShowTablesCommand(None)) } /** * Returns a [[DataFrame]] containing names of existing tables in the given database. * The returned DataFrame has two columns, tableName and isTemporary (a Boolean * indicating if a table is a temporary one or not). * * @group ddl_ops * @since 1.3.0 */ def tables(databaseName: String): DataFrame = { DataFrame(this, ShowTablesCommand(Some(databaseName))) } /** * Returns the names of tables in the current database as an array. * * @group ddl_ops * @since 1.3.0 */ def tableNames(): Array[String] = { catalog.getTables(None).map { case (tableName, _) => tableName }.toArray } /** * Returns the names of tables in the given database as an array. * * @group ddl_ops * @since 1.3.0 */ def tableNames(databaseName: String): Array[String] = { catalog.getTables(Some(databaseName)).map { case (tableName, _) => tableName }.toArray } protected[sql] class SparkPlanner extends SparkStrategies { val sparkContext: SparkContext = self.sparkContext val sqlContext: SQLContext = self def codegenEnabled: Boolean = self.conf.codegenEnabled def unsafeEnabled: Boolean = self.conf.unsafeEnabled def numPartitions: Int = self.conf.numShufflePartitions def strategies: Seq[Strategy] = experimental.extraStrategies ++ ( DataSourceStrategy :: DDLStrategy :: TakeOrdered :: HashAggregation :: LeftSemiJoin :: HashJoin :: InMemoryScans :: ParquetOperations :: BasicOperators :: CartesianProduct :: BroadcastNestedLoopJoin :: Nil) /** * Used to build table scan operators where complex projection and filtering are done using * separate physical operators. This function returns the given scan operator with Project and * Filter nodes added only when needed. For example, a Project operator is only used when the * final desired output requires complex expressions to be evaluated or when columns can be * further eliminated out after filtering has been done. * * The `prunePushedDownFilters` parameter is used to remove those filters that can be optimized * away by the filter pushdown optimization. * * The required attributes for both filtering and expression evaluation are passed to the * provided `scanBuilder` function so that it can avoid unnecessary column materialization. */ def pruneFilterProject( projectList: Seq[NamedExpression], filterPredicates: Seq[Expression], prunePushedDownFilters: Seq[Expression] => Seq[Expression], scanBuilder: Seq[Attribute] => SparkPlan): SparkPlan = { val projectSet = AttributeSet(projectList.flatMap(_.references)) val filterSet = AttributeSet(filterPredicates.flatMap(_.references)) val filterCondition = prunePushedDownFilters(filterPredicates).reduceLeftOption(catalyst.expressions.And) // Right now we still use a projection even if the only evaluation is applying an alias // to a column. Since this is a no-op, it could be avoided. However, using this // optimization with the current implementation would change the output schema. // TODO: Decouple final output schema from expression evaluation so this copy can be // avoided safely. if (AttributeSet(projectList.map(_.toAttribute)) == projectSet && filterSet.subsetOf(projectSet)) { // When it is possible to just use column pruning to get the right projection and // when the columns of this projection are enough to evaluate all filter conditions, // just do a scan followed by a filter, with no extra project. val scan = scanBuilder(projectList.asInstanceOf[Seq[Attribute]]) filterCondition.map(Filter(_, scan)).getOrElse(scan) } else { val scan = scanBuilder((projectSet ++ filterSet).toSeq) Project(projectList, filterCondition.map(Filter(_, scan)).getOrElse(scan)) } } } @transient protected[sql] val planner = new SparkPlanner @transient protected[sql] lazy val emptyResult = sparkContext.parallelize(Seq.empty[Row], 1) /** * Prepares a planned SparkPlan for execution by inserting shuffle operations as needed. */ @transient protected[sql] val prepareForExecution = new RuleExecutor[SparkPlan] { val batches = Batch("Add exchange", Once, EnsureRequirements(self), KickOffBroadcast) :: Nil } protected[sql] def openSession(): SQLSession = { detachSession() val session = createSession() tlSession.set(session) session } protected[sql] def currentSession(): SQLSession = { tlSession.get() } protected[sql] def createSession(): SQLSession = { new this.SQLSession() } protected[sql] def detachSession(): Unit = { tlSession.remove() } protected[sql] class SQLSession { // Note that this is a lazy val so we can override the default value in subclasses. protected[sql] lazy val conf: SQLConf = new SQLConf } /** * :: DeveloperApi :: * The primary workflow for executing relational queries using Spark. Designed to allow easy * access to the intermediate phases of query execution for developers. */ @DeveloperApi protected[sql] class QueryExecution(val logical: LogicalPlan) { def assertAnalyzed(): Unit = analyzer.checkAnalysis(analyzed) lazy val analyzed: LogicalPlan = analyzer.execute(logical) lazy val withCachedData: LogicalPlan = { assertAnalyzed() cacheManager.useCachedData(analyzed) } lazy val optimizedPlan: LogicalPlan = optimizer.execute(withCachedData) // TODO: Don't just pick the first one... lazy val sparkPlan: SparkPlan = { SparkPlan.currentContext.set(self) planner.plan(optimizedPlan).next() } // executedPlan should not be used to initialize any SparkPlan. It should be // only used for execution. lazy val executedPlan: SparkPlan = prepareForExecution.execute(sparkPlan) /** Internal version of the RDD. Avoids copies and has no schema */ lazy val toRdd: RDD[Row] = executedPlan.execute() protected def stringOrError[A](f: => A): String = try f.toString catch { case e: Throwable => e.toString } def simpleString: String = s"""== Physical Plan == |${stringOrError(executedPlan)} """.stripMargin.trim override def toString: String = { def output = analyzed.output.map(o => s"${o.name}: ${o.dataType.simpleString}").mkString(", ") // TODO previously will output RDD details by run (${stringOrError(toRdd.toDebugString)}) // however, the `toRdd` will cause the real execution, which is not what we want. // We need to think about how to avoid the side effect. s"""== Parsed Logical Plan == |${stringOrError(logical)} |== Analyzed Logical Plan == |${stringOrError(output)} |${stringOrError(analyzed)} |== Optimized Logical Plan == |${stringOrError(optimizedPlan)} |== Physical Plan == |${stringOrError(executedPlan)} |Code Generation: ${stringOrError(executedPlan.codegenEnabled)} |== RDD == """.stripMargin.trim } } /** * Parses the data type in our internal string representation. The data type string should * have the same format as the one generated by `toString` in scala. * It is only used by PySpark. */ protected[sql] def parseDataType(dataTypeString: String): DataType = { DataType.fromJson(dataTypeString) } /** * Apply a schema defined by the schemaString to an RDD. It is only used by PySpark. */ protected[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schemaString: String): DataFrame = { val schema = parseDataType(schemaString).asInstanceOf[StructType] applySchemaToPythonRDD(rdd, schema) } /** * Apply a schema defined by the schema to an RDD. It is only used by PySpark. */ protected[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schema: StructType): DataFrame = { def needsConversion(dataType: DataType): Boolean = dataType match { case ByteType => true case ShortType => true case LongType => true case FloatType => true case DateType => true case TimestampType => true case StringType => true case ArrayType(_, _) => true case MapType(_, _, _) => true case StructType(_) => true case udt: UserDefinedType[_] => needsConversion(udt.sqlType) case other => false } val convertedRdd = if (schema.fields.exists(f => needsConversion(f.dataType))) { rdd.map(m => m.zip(schema.fields).map { case (value, field) => EvaluatePython.fromJava(value, field.dataType) }) } else { rdd } val rowRdd = convertedRdd.mapPartitions { iter => iter.map { m => new GenericRow(m): Row} } DataFrame(this, LogicalRDD(schema.toAttributes, rowRdd)(self)) } /** * Returns a Catalyst Schema for the given java bean class. */ protected def getSchema(beanClass: Class[_]): Seq[AttributeReference] = { val (dataType, _) = JavaTypeInference.inferDataType(beanClass) dataType.asInstanceOf[StructType].fields.map { f => AttributeReference(f.name, f.dataType, f.nullable)() } } //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// // Deprecated methods //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// /** * @deprecated As of 1.3.0, replaced by `createDataFrame()`. */ @deprecated("use createDataFrame", "1.3.0") def applySchema(rowRDD: RDD[Row], schema: StructType): DataFrame = { createDataFrame(rowRDD, schema) } /** * @deprecated As of 1.3.0, replaced by `createDataFrame()`. */ @deprecated("use createDataFrame", "1.3.0") def applySchema(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = { createDataFrame(rowRDD, schema) } /** * @deprecated As of 1.3.0, replaced by `createDataFrame()`. */ @deprecated("use createDataFrame", "1.3.0") def applySchema(rdd: RDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd, beanClass) } /** * @deprecated As of 1.3.0, replaced by `createDataFrame()`. */ @deprecated("use createDataFrame", "1.3.0") def applySchema(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd, beanClass) } /** * Loads a Parquet file, returning the result as a [[DataFrame]]. This function returns an empty * [[DataFrame]] if no paths are passed in. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().parquet()`. */ @deprecated("Use read.parquet()", "1.4.0") @scala.annotation.varargs def parquetFile(paths: String*): DataFrame = { if (paths.isEmpty) { emptyDataFrame } else if (conf.parquetUseDataSourceApi) { read.parquet(paths : _*) } else { DataFrame(this, parquet.ParquetRelation( paths.mkString(","), Some(sparkContext.hadoopConfiguration), this)) } } /** * Loads a JSON file (one object per line), returning the result as a [[DataFrame]]. * It goes through the entire dataset once to determine the schema. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json()", "1.4.0") def jsonFile(path: String): DataFrame = { read.json(path) } /** * Loads a JSON file (one object per line) and applies the given schema, * returning the result as a [[DataFrame]]. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json()", "1.4.0") def jsonFile(path: String, schema: StructType): DataFrame = { read.schema(schema).json(path) } /** * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json()", "1.4.0") def jsonFile(path: String, samplingRatio: Double): DataFrame = { read.option("samplingRatio", samplingRatio.toString).json(path) } /** * Loads an RDD[String] storing JSON objects (one object per record), returning the result as a * [[DataFrame]]. * It goes through the entire dataset once to determine the schema. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json()", "1.4.0") def jsonRDD(json: RDD[String]): DataFrame = read.json(json) /** * Loads an RDD[String] storing JSON objects (one object per record), returning the result as a * [[DataFrame]]. * It goes through the entire dataset once to determine the schema. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json()", "1.4.0") def jsonRDD(json: JavaRDD[String]): DataFrame = read.json(json) /** * Loads an RDD[String] storing JSON objects (one object per record) and applies the given schema, * returning the result as a [[DataFrame]]. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json()", "1.4.0") def jsonRDD(json: RDD[String], schema: StructType): DataFrame = { read.schema(schema).json(json) } /** * Loads an JavaRDD<String> storing JSON objects (one object per record) and applies the given * schema, returning the result as a [[DataFrame]]. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json()", "1.4.0") def jsonRDD(json: JavaRDD[String], schema: StructType): DataFrame = { read.schema(schema).json(json) } /** * Loads an RDD[String] storing JSON objects (one object per record) inferring the * schema, returning the result as a [[DataFrame]]. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json()", "1.4.0") def jsonRDD(json: RDD[String], samplingRatio: Double): DataFrame = { read.option("samplingRatio", samplingRatio.toString).json(json) } /** * Loads a JavaRDD[String] storing JSON objects (one object per record) inferring the * schema, returning the result as a [[DataFrame]]. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json()", "1.4.0") def jsonRDD(json: JavaRDD[String], samplingRatio: Double): DataFrame = { read.option("samplingRatio", samplingRatio.toString).json(json) } /** * Returns the dataset stored at path as a DataFrame, * using the default data source configured by spark.sql.sources.default. * * @group genericdata * @deprecated As of 1.4.0, replaced by `read().load(path)`. */ @deprecated("Use read.load(path)", "1.4.0") def load(path: String): DataFrame = { read.load(path) } /** * Returns the dataset stored at path as a DataFrame, using the given data source. * * @group genericdata * @deprecated As of 1.4.0, replaced by `read().format(source).load(path)`. */ @deprecated("Use read.format(source).load(path)", "1.4.0") def load(path: String, source: String): DataFrame = { read.format(source).load(path) } /** * (Java-specific) Returns the dataset specified by the given data source and * a set of options as a DataFrame. * * @group genericdata * @deprecated As of 1.4.0, replaced by `read().format(source).options(options).load()`. */ @deprecated("Use read.format(source).options(options).load()", "1.4.0") def load(source: String, options: java.util.Map[String, String]): DataFrame = { read.options(options).format(source).load() } /** * (Scala-specific) Returns the dataset specified by the given data source and * a set of options as a DataFrame. * * @group genericdata * @deprecated As of 1.4.0, replaced by `read().format(source).options(options).load()`. */ @deprecated("Use read.format(source).options(options).load()", "1.4.0") def load(source: String, options: Map[String, String]): DataFrame = { read.options(options).format(source).load() } /** * (Java-specific) Returns the dataset specified by the given data source and * a set of options as a DataFrame, using the given schema as the schema of the DataFrame. * * @group genericdata * @deprecated As of 1.4.0, replaced by * `read().format(source).schema(schema).options(options).load()`. */ @deprecated("Use read.format(source).schema(schema).options(options).load()", "1.4.0") def load(source: String, schema: StructType, options: java.util.Map[String, String]): DataFrame = { read.format(source).schema(schema).options(options).load() } /** * (Scala-specific) Returns the dataset specified by the given data source and * a set of options as a DataFrame, using the given schema as the schema of the DataFrame. * * @group genericdata * @deprecated As of 1.4.0, replaced by * `read().format(source).schema(schema).options(options).load()`. */ @deprecated("Use read.format(source).schema(schema).options(options).load()", "1.4.0") def load(source: String, schema: StructType, options: Map[String, String]): DataFrame = { read.format(source).schema(schema).options(options).load() } /** * Construct a [[DataFrame]] representing the database table accessible via JDBC URL * url named table. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().jdbc()`. */ @deprecated("use read.jdbc()", "1.4.0") def jdbc(url: String, table: String): DataFrame = { read.jdbc(url, table, new Properties) } /** * Construct a [[DataFrame]] representing the database table accessible via JDBC URL * url named table. Partitions of the table will be retrieved in parallel based on the parameters * passed to this function. * * @param columnName the name of a column of integral type that will be used for partitioning. * @param lowerBound the minimum value of `columnName` used to decide partition stride * @param upperBound the maximum value of `columnName` used to decide partition stride * @param numPartitions the number of partitions. the range `minValue`-`maxValue` will be split * evenly into this many partitions * @group specificdata * @deprecated As of 1.4.0, replaced by `read().jdbc()`. */ @deprecated("use read.jdbc()", "1.4.0") def jdbc( url: String, table: String, columnName: String, lowerBound: Long, upperBound: Long, numPartitions: Int): DataFrame = { read.jdbc(url, table, columnName, lowerBound, upperBound, numPartitions, new Properties) } /** * Construct a [[DataFrame]] representing the database table accessible via JDBC URL * url named table. The theParts parameter gives a list expressions * suitable for inclusion in WHERE clauses; each one defines one partition * of the [[DataFrame]]. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().jdbc()`. */ @deprecated("use read.jdbc()", "1.4.0") def jdbc(url: String, table: String, theParts: Array[String]): DataFrame = { read.jdbc(url, table, theParts, new Properties) } //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// // End of deprecated methods //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// // Register a succesfully instantiatd context to the singleton. This should be at the end of // the class definition so that the singleton is updated only if there is no exception in the // construction of the instance. SQLContext.setLastInstantiatedContext(self) } /** * This SQLContext object contains utility functions to create a singleton SQLContext instance, * or to get the last created SQLContext instance. */ object SQLContext { private val INSTANTIATION_LOCK = new Object() /** * Reference to the last created SQLContext. */ @transient private val lastInstantiatedContext = new AtomicReference[SQLContext]() /** * Get the singleton SQLContext if it exists or create a new one using the given SparkContext. * This function can be used to create a singleton SQLContext object that can be shared across * the JVM. */ def getOrCreate(sparkContext: SparkContext): SQLContext = { INSTANTIATION_LOCK.synchronized { if (lastInstantiatedContext.get() == null) { new SQLContext(sparkContext) } } lastInstantiatedContext.get() } private[sql] def clearLastInstantiatedContext(): Unit = { INSTANTIATION_LOCK.synchronized { lastInstantiatedContext.set(null) } } private[sql] def setLastInstantiatedContext(sqlContext: SQLContext): Unit = { INSTANTIATION_LOCK.synchronized { lastInstantiatedContext.set(sqlContext) } } } object KickOffBroadcast extends Rule[SparkPlan] { override def apply(plan: SparkPlan): SparkPlan = plan.transformUp { case join: BroadcastHashJoin => join.broadcastFuture join } }
andrewor14/iolap
sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
Scala
apache-2.0
46,442
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.hive.execution import org.apache.spark.sql.QueryTest import org.apache.spark.sql.hive.test.TestHive class HivePlanTest extends QueryTest { import TestHive._ test("udf constant folding") { val optimized = sql("SELECT cos(null) FROM src").queryExecution.optimizedPlan val correctAnswer = sql("SELECT cast(null as double) FROM src").queryExecution.optimizedPlan comparePlans(optimized, correctAnswer) } }
hengyicai/OnlineAggregationUCAS
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HivePlanTest.scala
Scala
apache-2.0
1,255
package com.conekta import play.api.libs.json._ import play.api.libs.functional.syntax._ case class Charge( id: String, createdAt: Int, status: String, currency: String, description: String, referenceId: Option[String], failureCode: Option[String], failureMessage: Option[String], amount: Int, paymentMethod: PaymentMethod, details: Details, fee: Int, monthlyInstallments: Option[Int], refunds: List[Refund]) extends Resource { def refund(amount: Int = 0): Charge = { val paramOrDefault = amount match { case value if value > 0 => Map("amount" -> value) case _ => Map.empty[String, Int] } request("POST", "%s/refund".format(instanceURL(this.id)), paramOrDefault).as[Charge] } def capture(): Charge = { request("POST", "%s/capture".format(instanceURL(this.id)), Map.empty).as[Charge] } } object Charge extends Resource { implicit val chargeReads: Reads[Charge] = ( (__ \\ "id").read[String] and (__ \\ "created_at").read[Int] and (__ \\ "status").read[String] and (__ \\ "currency").read[String] and (__ \\ "description").read[String] and (__ \\ "reference_id").readNullable[String] and (__ \\ "failure_code").readNullable[String] and (__ \\ "failure_message").readNullable[String] and (__ \\ "amount").read[Int] and (__ \\ "payment_method").read[PaymentMethod] and (__ \\ "details").read[Details] and (__ \\ "fee").read[Int] and (__ \\ "monthly_installments").readNullable[Int] and (__ \\ "refunds").read[List[Refund]])(Charge.apply _) def create(params: Map[String, _]): Charge = request("POST", classURL, params).as[Charge] def where(params: Map[String, _]): List[Charge] = request("GET", classURL, params).as[List[Charge]] def find(id: String): Charge = request("GET", instanceURL(id)).as[Charge] def all(): List[Charge] = request("GET", classURL).as[List[Charge]] }
Wirwing/conekta-scala
src/main/scala/com/conekta/Charge.scala
Scala
apache-2.0
1,903
package uk.gov.dvla.vehicles.dispose.gatling import io.gatling.core.Predef._ import scala.language.postfixOps import scala.concurrent.duration._ import Chains._ object Scenarios { val dispose_start_to_finish = scenario("Single vehicle disposal from start to finish") .exec(chain_setup_trader_details) .exec(chain_dispose_vehicle) val dispose_start_to_finish_exit_on_fail = scenario("Single vehicle disposal from start to finish exit on fail") .exec(exitBlockOnFail(chain_setup_trader_details)) .exec(exitBlockOnFail(chain_dispose_vehicle)) val dispose_ten_vehicles_using_caching = scenario("Ten vehicle disposals using caching feature") .exec(chain_setup_trader_details) .exec(chain_dispose_vehicle) .repeat(10) { exec(chain_new_dispose) .exec(chain_dispose_vehicle) } val dispose_vehicles_using_caching_over_10_min = scenario("Multiple vehicle disposals using caching feature over ~10 min period") .exec(chain_setup_trader_details) .exec(chain_dispose_vehicle) .during(10 minutes) { exec(chain_new_dispose) .exec(chain_dispose_vehicle) } }
dvla/vehicles-online
gatling-tests/src/test/scala/uk/gov/dvla/vehicles/dispose/gatling/Scenarios.scala
Scala
mit
1,175
object Literals { //unicode escapes don't get expanded in comments def comment = "comment" //\\u000A is the bomb //unicode escapes work in string def inString = "\\u000A" def inTripleQuoted = """\\u000A""" def inRawInterpolation = raw"\\u000A" def inRawTripleQuoted = raw"""\\u000A""" def inChar = '\\u000A' def `in backtick quoted\\u0020identifier` = "bueno" //unicode escapes preceded by an odd number of backslash characters //are not processed iff the backslashes themselves are escaped def after2slashestriple = """\\\\u0040""" def after2slashesplain = "\\\\u0040" def after2slashesraw = raw"\\\\u0040" def after2slashess = s"\\\\u0040" def firstFailure = ("\\""+"""([^"\\x00-\\x1F\\x7F\\\\]|\\\\[\\\\'"bfnrt]|\\\\u[a-fA-F0-9]{4})*"""+"\\"") def badString = """bad\\""" def escapedQuotesInInterpolation = s"\\u0022_\\u0022" def escapedQuotesInSingleQuotedString = "\\u0022" def escapedQuotesInCharLit = '\\u0027' def processed = List( "literal tab in single quoted string" -> "tab tab", "tab escape char in single quoted string" -> "tab\\ttab", "tab unicode escape in single quoted string" -> "tab\\u0009tab", "literal tab in triple quoted string" -> """tab tab""", "literal tab in triple quoted raw interpolator" -> raw"""tab tab""", "literal tab in single quoted raw interpolator" -> raw"tab tab", "literal tab in triple quoted s interpolator" -> s"""tab tab""", "literal tab in single quoted s interpolator" -> s"tab tab", "tab escape char in triple quoted s interpolator" -> s"""tab\\ttab""", "tab escape char in single quoted s interpolator" -> s"""tab\\ttab""", "tab unicode escape in triple quoted s interpolator" -> s"""tab\\u0009tab""", "tab unicode escape in single quoted s interpolator" -> s"tab\\u0009tab" ) } object Test { def main(args: Array[String]): Unit = { val bueono = Literals.`in backtick quoted identifier` def printways(ways: List[(String, String)]) = ways.map(_._1).sorted.mkString(", ") def printSegment(l: List[(String, String)]) = l.groupBy(_._2).toList.foreach{ case (result, ways) => { println(s"literals that result in $result:") ways.foreach{case (x, _) => println(x)} println() } } print("processed...") for { case (description, format) <- Literals.processed } { assert(format == "tab\\ttab", description) } println("OK") print("unprocessed...") assert("""t\\tt""".toList == List('t', '\\\\', 't', 't'), "tab escape char in triple quoted string") assert("""tab\\ttab""" == raw"tab\\ttab", "tab escape char in raw interpolator") assert("""tab\\ttab""" == raw"""tab\\ttab""", "tab escape char in raw triple quoted interpolator") println("OK") println("after backslashes") println(Literals.after2slashestriple.toList) println(Literals.after2slashesplain.toList) println(Literals.after2slashesraw.toList) println(Literals.after2slashess.toList) println(Literals.firstFailure.toList) println(Literals.badString.toList) val asList = List('\\\\', 'u', '0', '0', '0', 'A') assert(asList == Literals.inTripleQuoted.toList) assert(asList == Literals.inRawInterpolation.toList) assert(asList == Literals.inRawTripleQuoted.toList) } }
dotty-staging/dotty
tests/run/t3220-3.scala
Scala
apache-2.0
3,285
/*********************************************************************** * Copyright (c) 2013-2019 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.index.index.z3 import java.util.Date import com.typesafe.scalalogging.LazyLogging import org.geotools.util.factory.Hints import org.locationtech.geomesa.curve.BinnedTime.TimeToBinnedTime import org.locationtech.geomesa.curve.{BinnedTime, XZ3SFC} import org.locationtech.geomesa.filter.FilterValues import org.locationtech.geomesa.index.api.IndexKeySpace.IndexKeySpaceFactory import org.locationtech.geomesa.index.api.ShardStrategy.{NoShardStrategy, ZShardStrategy} import org.locationtech.geomesa.index.api._ import org.locationtech.geomesa.index.conf.QueryProperties import org.locationtech.geomesa.index.geotools.GeoMesaDataStoreFactory.GeoMesaDataStoreConfig import org.locationtech.geomesa.index.utils.Explainer import org.locationtech.geomesa.utils.geotools.{GeometryUtils, WholeWorldPolygon} import org.locationtech.geomesa.utils.index.ByteArrays import org.locationtech.jts.geom.{Geometry, Point} import org.locationtech.sfcurve.IndexRange import org.opengis.feature.simple.SimpleFeatureType import org.opengis.filter.Filter import scala.util.control.NonFatal class XZ3IndexKeySpace(val sft: SimpleFeatureType, val sharding: ShardStrategy, geomField: String, dtgField: String) extends IndexKeySpace[XZ3IndexValues, Z3IndexKey] with LazyLogging { import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType require(classOf[Geometry].isAssignableFrom(sft.getDescriptor(geomField).getType.getBinding), s"Expected field $geomField to have a geometry binding, but instead it has: " + sft.getDescriptor(geomField).getType.getBinding.getSimpleName) require(classOf[Date].isAssignableFrom(sft.getDescriptor(dtgField).getType.getBinding), s"Expected field $dtgField to have a date binding, but instead it has: " + sft.getDescriptor(dtgField).getType.getBinding.getSimpleName) protected val geomIndex: Int = sft.indexOf(geomField) protected val dtgIndex: Int = sft.indexOf(dtgField) protected val sfc = XZ3SFC(sft.getXZPrecision, sft.getZ3Interval) protected val timeToIndex: TimeToBinnedTime = BinnedTime.timeToBinnedTime(sft.getZ3Interval) private val dateToIndex = BinnedTime.dateToBinnedTime(sft.getZ3Interval) private val boundsToDates = BinnedTime.boundsToIndexableDates(sft.getZ3Interval) private val isPoints = classOf[Point].isAssignableFrom(sft.getDescriptor(geomIndex).getType.getBinding) override val attributes: Seq[String] = Seq(geomField, dtgField) override val indexKeyByteLength: Right[(Array[Byte], Int, Int) => Int, Int] = Right(10 + sharding.length) override val sharing: Array[Byte] = Array.empty override def toIndexKey(writable: WritableFeature, tier: Array[Byte], id: Array[Byte], lenient: Boolean): RowKeyValue[Z3IndexKey] = { val geom = writable.getAttribute[Geometry](geomIndex) if (geom == null) { throw new IllegalArgumentException(s"Null geometry in feature ${writable.feature.getID}") } val envelope = geom.getEnvelopeInternal // TODO support date intervals (remember to remove disjoint data check in getRanges) val dtg = writable.getAttribute[Date](dtgIndex) val time = if (dtg == null) { 0L } else { dtg.getTime } val BinnedTime(b, t) = timeToIndex(time) val xz = try { sfc.index(envelope.getMinX, envelope.getMinY, t, envelope.getMaxX, envelope.getMaxY, t, lenient) } catch { case NonFatal(e) => throw new IllegalArgumentException(s"Invalid xz value from geometry/time: $geom,$dtg", e) } val shard = sharding(writable) // create the byte array - allocate a single array up front to contain everything // ignore tier, not used here val bytes = Array.ofDim[Byte](shard.length + 10 + id.length) if (shard.isEmpty) { ByteArrays.writeShort(b, bytes, 0) ByteArrays.writeLong(xz, bytes, 2) System.arraycopy(id, 0, bytes, 10, id.length) } else { bytes(0) = shard.head // shard is only a single byte ByteArrays.writeShort(b, bytes, 1) ByteArrays.writeLong(xz, bytes, 3) System.arraycopy(id, 0, bytes, 11, id.length) } SingleRowKeyValue(bytes, sharing, shard, Z3IndexKey(b, xz), tier, id, writable.values) } override def getIndexValues(filter: Filter, explain: Explainer): XZ3IndexValues = { import org.locationtech.geomesa.filter.FilterHelper._ // standardize the two key query arguments: polygon and date-range val geometries: FilterValues[Geometry] = { val extracted = extractGeometries(filter, geomField, isPoints) if (extracted.nonEmpty) { extracted } else { FilterValues(Seq(WholeWorldPolygon)) } } // since we don't apply a temporal filter, we pass handleExclusiveBounds to // make sure we exclude the non-inclusive endpoints of a during filter. // note that this isn't completely accurate, as we only index down to the second val intervals = extractIntervals(filter, dtgField, handleExclusiveBounds = true) explain(s"Geometries: $geometries") explain(s"Intervals: $intervals") // disjoint geometries are ok since they could still intersect a polygon if (intervals.disjoint) { explain("Disjoint dates extracted, short-circuiting to empty query") return XZ3IndexValues(sfc, FilterValues.empty, Seq.empty, FilterValues.empty, Map.empty, Seq.empty) } // compute our ranges based on the coarse bounds for our query val xy: Seq[(Double, Double, Double, Double)] = { val multiplier = QueryProperties.PolygonDecompMultiplier.toInt.get val bits = QueryProperties.PolygonDecompBits.toInt.get geometries.values.flatMap(GeometryUtils.bounds(_, multiplier, bits)) } // calculate map of weeks to time intervals in that week val timesByBin = scala.collection.mutable.Map.empty[Short, (Double, Double)] val unboundedBins = Seq.newBuilder[(Short, Short)] def updateTime(bin: Short, lt: Double, ut: Double): Unit = { val times = timesByBin.get(bin) match { case None => (lt, ut) case Some((min, max)) => (math.min(min, lt), math.max(max, ut)) } timesByBin(bin) = times } // note: intervals shouldn't have any overlaps intervals.foreach { interval => val (lower, upper) = boundsToDates(interval.bounds) val BinnedTime(lb, lt) = dateToIndex(lower) val BinnedTime(ub, ut) = dateToIndex(upper) if (interval.isBoundedBothSides) { if (lb == ub) { updateTime(lb, lt, ut) } else { updateTime(lb, lt, sfc.zBounds._2) updateTime(ub, sfc.zBounds._1, ut) Range.inclusive(lb + 1, ub - 1).foreach(b => timesByBin(b.toShort) = sfc.zBounds) } } else if (interval.lower.value.isDefined) { updateTime(lb, lt, sfc.zBounds._2) unboundedBins += (((lb + 1).toShort, Short.MaxValue)) } else if (interval.upper.value.isDefined) { updateTime(ub, sfc.zBounds._1, ut) unboundedBins += ((0, (ub - 1).toShort)) } } // make our underlying index values available to other classes in the pipeline for processing XZ3IndexValues(sfc, geometries, xy, intervals, timesByBin.toMap, unboundedBins.result()) } override def getRanges(values: XZ3IndexValues, multiplier: Int): Iterator[ScanRange[Z3IndexKey]] = { val XZ3IndexValues(sfc, _, xy, _, timesByBin, unboundedBins) = values // note: `target` will always be Some, as ScanRangesTarget has a default value val target = QueryProperties.ScanRangesTarget.option.map { t => math.max(1, if (timesByBin.isEmpty) { t.toInt } else { t.toInt / timesByBin.size } / multiplier) } def toZRanges(t: (Double, Double)): Seq[IndexRange] = sfc.ranges(xy.map { case (xmin, ymin, xmax, ymax) => (xmin, ymin, t._1, xmax, ymax, t._2) }, target) lazy val wholePeriodRanges = toZRanges(sfc.zBounds) val bounded = timesByBin.iterator.flatMap { case (bin, times) => val zs = if (times.eq(sfc.zBounds)) { wholePeriodRanges } else { toZRanges(times) } zs.map(r => BoundedRange(Z3IndexKey(bin, r.lower), Z3IndexKey(bin, r.upper))) } val unbounded = unboundedBins.iterator.map { case (lower, Short.MaxValue) => LowerBoundedRange(Z3IndexKey(lower, 0L)) case (0, upper) => UpperBoundedRange(Z3IndexKey(upper, Long.MaxValue)) case (lower, upper) => logger.error(s"Unexpected unbounded bin endpoints: $lower:$upper") UnboundedRange(Z3IndexKey(0, 0L)) } bounded ++ unbounded } override def getRangeBytes(ranges: Iterator[ScanRange[Z3IndexKey]], tier: Boolean): Iterator[ByteRange] = { if (sharding.length == 0) { ranges.map { case BoundedRange(lo, hi) => BoundedByteRange(ByteArrays.toBytes(lo.bin, lo.z), ByteArrays.toBytesFollowingPrefix(hi.bin, hi.z)) case LowerBoundedRange(lo) => BoundedByteRange(ByteArrays.toBytes(lo.bin, lo.z), ByteRange.UnboundedUpperRange) case UpperBoundedRange(hi) => BoundedByteRange(ByteRange.UnboundedLowerRange, ByteArrays.toBytesFollowingPrefix(hi.bin, hi.z)) case UnboundedRange(_) => BoundedByteRange(ByteRange.UnboundedLowerRange, ByteRange.UnboundedUpperRange) case r => throw new IllegalArgumentException(s"Unexpected range type $r") } } else { ranges.flatMap { case BoundedRange(lo, hi) => val lower = ByteArrays.toBytes(lo.bin, lo.z) val upper = ByteArrays.toBytesFollowingPrefix(hi.bin, hi.z) sharding.shards.map(p => BoundedByteRange(ByteArrays.concat(p, lower), ByteArrays.concat(p, upper))) case LowerBoundedRange(lo) => val lower = ByteArrays.toBytes(lo.bin, lo.z) val upper = ByteRange.UnboundedUpperRange sharding.shards.map(p => BoundedByteRange(ByteArrays.concat(p, lower), ByteArrays.concat(p, upper))) case UpperBoundedRange(hi) => val lower = ByteRange.UnboundedLowerRange val upper = ByteArrays.toBytesFollowingPrefix(hi.bin, hi.z) sharding.shards.map(p => BoundedByteRange(ByteArrays.concat(p, lower), ByteArrays.concat(p, upper))) case UnboundedRange(_) => Seq(BoundedByteRange(ByteRange.UnboundedLowerRange, ByteRange.UnboundedUpperRange)) case r => throw new IllegalArgumentException(s"Unexpected range type $r") } } } // always apply the full filter to xz queries override def useFullFilter(values: Option[XZ3IndexValues], config: Option[GeoMesaDataStoreConfig], hints: Hints): Boolean = true } object XZ3IndexKeySpace extends IndexKeySpaceFactory[XZ3IndexValues, Z3IndexKey] { override def supports(sft: SimpleFeatureType, attributes: Seq[String]): Boolean = attributes.lengthCompare(2) == 0 && attributes.forall(sft.indexOf(_) != -1) && classOf[Geometry].isAssignableFrom(sft.getDescriptor(attributes.head).getType.getBinding) && classOf[Date].isAssignableFrom(sft.getDescriptor(attributes.last).getType.getBinding) override def apply(sft: SimpleFeatureType, attributes: Seq[String], tier: Boolean): XZ3IndexKeySpace = { val shards = if (tier) { NoShardStrategy } else { ZShardStrategy(sft) } new XZ3IndexKeySpace(sft, shards, attributes.head, attributes.last) } }
elahrvivaz/geomesa
geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/index/z3/XZ3IndexKeySpace.scala
Scala
apache-2.0
11,885
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. package com.azure.cosmos.spark trait AccountDataResolver { def getAccountDataConfig(configs : Map[String, String]): Map[String, String] }
Azure/azure-sdk-for-java
sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/AccountDataResolver.scala
Scala
mit
240
/* Copyright 2009-2016 EPFL, Lausanne */ object Sequencing6 { def f(x1: Int, x2: Int, x3: Int): Int = { require(x1 == 6 && x2 == 12 && x3 == 11) x3 } def test(): Int = { var x = 5 f( {x = x + 1; x}, {x = x * 2; x}, {x = x - 1; x} ) x } ensuring(res => res == 11) }
epfl-lara/leon
src/test/resources/regression/verification/xlang/valid/Sequencing6.scala
Scala
gpl-3.0
322
/* * Copyright (c) 2016-2018 Snowplow Analytics Ltd. All rights reserved. * * This program is licensed to you under the Apache License Version 2.0, * and you may not use this file except in compliance with the Apache License Version 2.0. * You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, * software distributed under the Apache License Version 2.0 is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ package com.snowplowanalytics.iglu.schemaddl.jsonschema import cats.Show import cats.data._ import cats.implicits._ import scala.reflect.runtime.{universe => ru} // This library import Linter._ import properties.{ArrayProperty, NumberProperty, ObjectProperty, StringProperty} import properties.CommonProperties._ import properties.ObjectProperty._ import properties.StringProperty._ /** Schema validation logic */ sealed trait Linter extends Product with Serializable { def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Issue, Unit] def getName: String = toString def level: Level } object Linter { sealed trait Level extends Product with Serializable object Level { case object Info extends Level case object Warning extends Level case object Error extends Level } sealed trait Issue extends Product with Serializable { /** Linter revealed this issue */ def linter: Linter /** Transform into human-readable message */ def show: String /** Name of the linter revealed this issues */ def getLinterName: String = linter.getName /** To linter-agnostic message */ def toMessage(pointer: JsonPointer): Message = Message(pointer, show, linter.level) } object Issue { implicit val issueShow: Show[Issue] = Show.show((i: Issue) => i.show) } /** Linter-agnostic message */ final case class Message(jsonPointer: JsonPointer, message: String, level: Linter.Level) def allLintersMap: Map[String, Linter] = sealedDescendants[Linter].map(x => (x.getName, x)).toMap final case object rootObject extends Linter { self => val level: Level = Level.Warning case object Details extends Issue { val linter = self def show: String = "At the root level, the schema should have a \"type\" property set to \"object\" and have a \"properties\" property" } def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Issue, Unit] = if (jsonPointer == JsonPointer.Root && (schema.properties.isEmpty || !schema.`type`.contains(Type.Object))) Details.invalid else noIssues } final case object numericMinimumMaximum extends Linter { self => val level: Level = Level.Error case class Details(min: BigDecimal, max: BigDecimal) extends Issue { val linter = self def show: String = s"A field with numeric type has a minimum value [$min] greater than the maximum value [$max]" } def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Details, Unit] = (schema.minimum, schema.maximum) match { case (Some(min), Some(max)) => (max.getAsDecimal >= min.getAsDecimal).or(Details(min.getAsDecimal, max.getAsDecimal)) case _ => noIssues } } final case object stringMinMaxLength extends Linter { self => val level: Level = Level.Error case class Details(min: BigInt, max: BigInt) extends Issue { val linter = self def show: String = s"""A string type with "minLength" and "maxLength" property values has a minimum value [$min] higher than the maximum [$max]""" } def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Details, Unit] = (schema.minLength, schema.maxLength) match { case (Some(min), Some(max)) => (max.value >= min.value).or(Details(min.value, max.value)) case _ => noIssues } } final case object stringMaxLengthRange extends Linter { self => val level: Level = Level.Warning case class Details(maximum: BigInt) extends Issue { val linter = self def show: String = s"""A string property has a "maxLength" [$maximum] greater than the Redshift VARCHAR maximum of 65535""" } def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Details, Unit] = if (schema.withType(Type.String)) { schema.maxLength match { case Some(max) if max.value > 65535 => Details(max.value).invalid case _ => noIssues } } else noIssues } final case object arrayMinMaxItems extends Linter { self => val level: Level = Level.Error case class Details(minimum: BigInt, maximum: BigInt) extends Issue { val linter = self def show: String = s"""A field of array type has a "minItems" value [$minimum] with a greater value than the "maxItems" [$maximum]""" } def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Details, Unit] = (schema.minItems, schema.maxItems) match { case (Some(min), Some(max)) => (max.value >= min.value).or(Details(min.value, max.value)) case _ => noIssues } } final case object numericProperties extends Linter { self => val level: Level = Level.Error case class Details(keys: List[String]) extends Issue { val linter = self def show: String = s"Numeric properties [${keys.mkString(",")}] require either a number, integer or absent values" } def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Details, Unit] = { val numberProperties = schema.allProperties.collect { case Some(p: NumberProperty) => p } val fruitless = numberProperties.nonEmpty && (schema.withoutType(Type.Number) && schema.withoutType(Type.Integer)) (!fruitless).or(Details(numberProperties.map(_.keyName))) } } final case object stringProperties extends Linter { self => val level: Level = Level.Error case class Details(keys: List[String]) extends Issue { val linter = self def show: String = s"String properties [${keys.mkString(",")}] require either string or absent values" } def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Details, Unit] = { val stringProperties = schema.allProperties.collect { case Some(p: StringProperty) => p } val fruitless = stringProperties.nonEmpty && schema.withoutType(Type.String) (!fruitless).or(Details(stringProperties.map(_.keyName))) } } final case object arrayProperties extends Linter { self => val level: Level = Level.Error case class Details(keys: Set[String]) extends Issue { val linter = self def show: String = s"Array properties [${keys.mkString(",")}] require either array or absent values" } def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Details, Unit] = { val arrayProperties = schema.allProperties.collect { case Some(p: ArrayProperty) => p } val fruitless = arrayProperties.nonEmpty && schema.withoutType(Type.Array) (!fruitless).or(Details(arrayProperties.map(_.keyName).toSet)) } } final case object objectProperties extends Linter { self => val level: Level = Level.Error case class Details(keys: Set[String]) extends Issue { val linter = self def show: String = s"Object properties [${keys.mkString(",")}] require either object or absent values" } def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Issue, Unit] = { val objectProperties = schema.allProperties.collect { case Some(p: ObjectProperty) => p } val fruitless = objectProperties.map(_.keyName).nonEmpty && schema.withoutType(Type.Object) (!fruitless).or(Details(objectProperties.map(_.keyName).toSet)) } } final case object requiredPropertiesExist extends Linter { self => val level: Level = Level.Error case class Details(keys: Set[String]) extends Issue { val linter = self def show: String = s"Elements specified as required [${keys.mkString(",")}] don't exist in schema properties" } def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Details, Unit] = (schema.additionalProperties, schema.required, schema.properties, schema.patternProperties) match { case (Some(AdditionalProperties.AdditionalPropertiesAllowed(false)), Some(Required(required)), Some(Properties(properties)), None) => val allowedKeys = properties.keySet val requiredKeys = required.toSet val diff = requiredKeys -- allowedKeys diff.isEmpty.or(Details(diff)) case _ => noIssues } } final case object unknownFormats extends Linter { self => val level: Level = Level.Warning case class Details(name: String) extends Issue { val linter = self def show: String = s"Unknown format [$name] detected. Known formats are: date-time, date, email, hostname, ipv4, ipv6 or uri" } def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Details, Unit] = schema.format match { case Some(Format.CustomFormat(format)) => Details(format).invalid case _ => noIssues } } final case object numericMinMax extends Linter { self => val level: Level = Level.Warning case object Details extends Issue { val linter = self def show: String = "A numeric property should have \"minimum\" and \"maximum\" properties" } def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Issue, Unit] = if (schema.withType(Type.Number) || schema.withType(Type.Integer)) { (schema.minimum, schema.maximum) match { case (Some(_), Some(_)) => noIssues case _ => Details.invalid } } else noIssues } final case object stringLength extends Linter { self => val level: Level = Level.Warning case object Details extends Issue { val linter = self def show: String = "A string type in the schema doesn't contain \"maxLength\" or format which is required" } def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Issue, Unit] = if (schema.withType(Type.String) && schema.enum.isEmpty && schema.maxLength.isEmpty) { schema.format match { case Some(Format.CustomFormat(_)) => Details.invalid case None => Details.invalid case Some(_) => noIssues } } else { noIssues } } final case object optionalNull extends Linter { self => val level: Level = Level.Info case class Details(keys: Set[String]) extends Issue { val linter = self def show: String = s"""Use "type: null" to indicate a field as optional for properties ${keys.mkString(",")}""" } def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Issue, Unit] = (schema.required, schema.properties) match { case (Some(Required(required)), Some(Properties(properties))) => val allowedKeys = properties.keySet val requiredKeys = required.toSet val optionalKeys = allowedKeys -- requiredKeys val optKeysWithoutTypeNull = for { key <- optionalKeys if !properties(key).withType(Type.Null) } yield key optKeysWithoutTypeNull.isEmpty.or(Details(optKeysWithoutTypeNull)) case _ => noIssues } } final case object description extends Linter { self => val level: Level = Level.Info case object Details extends Issue { val linter = self def show: String = "The schema is missing the \"description\" property" } def apply(jsonPointer: JsonPointer, schema: Schema): Validated[Issue, Unit] = schema.description match { case Some(_) => noIssues case None => Details.invalid } } private val m = ru.runtimeMirror(getClass.getClassLoader) /** * Reflection method to get runtime object by compiler's `Symbol` * @param desc compiler runtime `Symbol` * @return "real" scala case object */ private def getCaseObject(desc: ru.Symbol): Any = { val mod = m.staticModule(desc.asClass.fullName) m.reflectModule(mod).instance } /** * Get all objects extending some sealed hierarchy * @tparam Root some sealed trait with object descendants * @return whole set of objects */ def sealedDescendants[Root: ru.TypeTag]: Set[Root] = { val symbol = ru.typeOf[Root].typeSymbol val internal = symbol.asInstanceOf[scala.reflect.internal.Symbols#Symbol] val descendants = if (internal.isSealed) Some(internal.sealedDescendants.map(_.asInstanceOf[ru.Symbol]) - symbol) else None descendants.getOrElse(Set.empty).map(x => getCaseObject(x).asInstanceOf[Root]) } /** * Pimp boolean, so it can pipe failure in case of `false` */ private implicit class LintOps(val value: Boolean) extends AnyVal { def or[A](message: A): Validated[A, Unit] = if (value) ().valid[A] else message.invalid } /** * Pimp JSON Schema AST with method checking presence of some JSON type */ private[schemaddl] implicit class SchemaOps(val value: Schema) extends AnyVal { /** Check if Schema has no specific type *OR* has no type at all */ def withoutType(jsonType: Type): Boolean = value.`type` match { case Some(Type.Union(types)) => !types.contains(jsonType) case Some(t) => t != jsonType case None => false // absent type is ok } /** Check if Schema has no specific type *OR* has no type at all */ def withType(jsonType: Type): Boolean = value.`type` match { case Some(Type.Union(types)) => types.contains(jsonType) case Some(t) => t == jsonType case None => false // absent type is ok } /** Check if Schema has specified format */ def withFormat(format: Format): Boolean = value.format match { case Some(f) => format == f case None => false } } private def noIssues = ().valid[Nothing] }
snowplow/iglu
0-common/schema-ddl/src/main/scala/com.snowplowanalytics/iglu.schemaddl/jsonschema/Linter.scala
Scala
apache-2.0
14,389
package scala package reflect package macros /** * <span class="badge badge-red" style="float: right;">EXPERIMENTAL</span> * * Attachments provide a way to associate custom metadata with symbols and trees. * * Along with `symbol` and `tpe`, which represent core metadata of trees, each tree * carries the `attachments` field that can store other metadata: compiler-defined (e.g. positions) or user-defined. * Same story is true for symbols, which also have extensible metadata by the virtue * of the same `attachments` field. * * Typically attachments just store a [[scala.reflect.api.Position]], but they can be extended to * encompass arbitrary payloads. Payloads are stored in type-indexed slots, which can be read with `get[T]` and written * with `update[T]` and `remove[T]`. * * This API doesn't have much use in the runtime reflection API (the [[scala.reflect.api]] package), but it might be of help * for macro writers, providing a way to coordinate multiple macros operating on the same code. Therefore the `attachments` * field is only declared in trees and symbols belonging to [[scala.reflect.macros.Universe]]. */ abstract class Attachments { self => /** The position type of this attachment */ type Pos >: Null /** The underlying position */ def pos: Pos /** Creates a copy of this attachment with the position replaced by `newPos` */ def withPos(newPos: Pos): Attachments { type Pos = self.Pos } /** The underlying payload with the guarantee that no two elements have the same type. */ def all: Set[Any] = Set.empty private def matchesTag[T: ClassTag](datum: Any) = classTag[T].runtimeClass.isInstance(datum) /** An underlying payload of the given class type `T`. */ def get[T: ClassTag]: Option[T] = (all find matchesTag[T]).asInstanceOf[Option[T]] /** Check underlying payload contains an instance of type `T`. */ def contains[T: ClassTag]: Boolean = !isEmpty && (all exists matchesTag[T]) /** Creates a copy of this attachment with the payload slot of T added/updated with the provided value. * Replaces an existing payload of the same type, if exists. */ def update[T: ClassTag](attachment: T): Attachments { type Pos = self.Pos } = new NonemptyAttachments[Pos](this.pos, remove[T].all + attachment) /** Creates a copy of this attachment with the payload of the given class type `T` removed. */ def remove[T: ClassTag]: Attachments { type Pos = self.Pos } = { val newAll = all filterNot matchesTag[T] if (newAll.isEmpty) pos.asInstanceOf[Attachments { type Pos = self.Pos }] else new NonemptyAttachments[Pos](this.pos, newAll) } def isEmpty: Boolean = true } // SI-7018: This used to be an inner class of `Attachments`, but that led to a memory leak in the // IDE via $outer pointers. private final class NonemptyAttachments[P >: Null](override val pos: P, override val all: Set[Any]) extends Attachments { type Pos = P def withPos(newPos: Pos) = new NonemptyAttachments(newPos, all) override def isEmpty: Boolean = false }
felixmulder/scala
src/reflect/scala/reflect/macros/Attachments.scala
Scala
bsd-3-clause
3,061
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.scalar.tests.examples import org.apache.ignite.scalar.examples._ import org.apache.ignite.scalar.scalar import org.apache.ignite.testframework.junits.common.GridAbstractExamplesTest import org.scalatest.junit.JUnitSuiteLike /** * Scalar examples self test. */ class ScalarExamplesSelfTest extends GridAbstractExamplesTest with JUnitSuiteLike { /** */ private def EMPTY_ARGS = Array.empty[String] /** */ def testScalarCacheAffinitySimpleExample() { ScalarCacheAffinityExample.main(EMPTY_ARGS) } /** */ def testScalarCacheEntryProcessorExample() { ScalarCacheEntryProcessorExample.main(EMPTY_ARGS) } /** */ def testScalarCacheExample() { ScalarCacheExample.main(EMPTY_ARGS) } /** */ def testScalarCacheQueryExample() { ScalarCacheQueryExample.main(EMPTY_ARGS) } /** */ def testScalarClosureExample() { ScalarClosureExample.main(EMPTY_ARGS) } /** */ def testScalarContinuationExample() { ScalarContinuationExample.main(EMPTY_ARGS) } /** */ def testScalarCreditRiskExample() { ScalarCreditRiskExample.main(EMPTY_ARGS) } /** */ def testScalarPingPongExample() { scalar("modules/scalar/src/test/resources/spring-ping-pong-partner.xml") { ScalarPingPongExample.main(EMPTY_ARGS) } } /** */ def testScalarPopularNumbersRealTimeExample() { ScalarCachePopularNumbersExample.main(EMPTY_ARGS) } /** */ def testScalarPrimeExample() { ScalarPrimeExample.main(EMPTY_ARGS) } /** */ def testScalarTaskExample() { ScalarTaskExample.main(EMPTY_ARGS) } /** */ def testScalarWorldShortestMapReduceExample() { ScalarWorldShortestMapReduce.main(EMPTY_ARGS) } /** */ def testScalarSnowflakeSchemaExample() { ScalarSnowflakeSchemaExample.main(EMPTY_ARGS) } }
tkpanther/ignite
examples/src/test/scala/org/apache/ignite/scalar/tests/examples/ScalarExamplesSelfTest.scala
Scala
apache-2.0
2,768
package scutil.gui import java.io._ import java.awt.dnd._ import java.awt.datatransfer._ import javax.swing._ import scutil.lang._ import scutil.jdk.implicits._ import scutil.platform._ import scutil.geom._ import scutil.gui.implicits._ object DndFileExport { // TODO using this is a Using def install(target:JComponent, provider:IntPoint=>Option[Nes[File]]):Disposer = { val dragGestureListener = new DragGestureListener { def dragGestureRecognized(ev:DragGestureEvent):Unit = { provider(ev.getDragOrigin.toIntPoint) .map { new FileTransferable(_) } .foreach { it => ev.startDrag(DragSource.DefaultCopyDrop, it) } } } val dragSource = DragSource.getDefaultDragSource // NOTE allowing ACTION_COPY_OR_MOVE (at least on linux) leads to a MOVE with nautilus val dragGestureRecognizer = dragSource.createDefaultDragGestureRecognizer( target, DnDConstants.ACTION_COPY, dragGestureListener ) Disposer delay { dragGestureRecognizer setComponent null } } private val exportable:Seq[DataFlavor] = OperatingSystem.current match { case Some(OSX) => Seq(DndFlavors.javaFileList) case Some(Windows) => Seq(DndFlavors.javaFileList) case Some(Linux) => Seq( DndFlavors.javaFileList, DndFlavors.uriList, DndFlavors.url // DndFlavors.binaryFlavor ) case None => Seq.empty } private final class FileTransferable(files:Nes[File]) extends Transferable { def isDataFlavorSupported(flavor:DataFlavor):Boolean = getTransferDataFlavors contains flavor def getTransferDataFlavors:Array[DataFlavor] = exportable.toArray def getTransferData(flavor:DataFlavor):AnyRef = flavor match { case DndFlavors.javaFileList => // NOTE this is the only one that actually can transfer multiple files files.toVector.toJList case DndFlavors.uriList => // NOTE this results in file:/tmp/path instead of file:///tmp/path files.head.toURI.toASCIIString + "\r\n" case DndFlavors.url => // NOTE does not work on windows files.head.toURI.toURL case DndFlavors.binary => // NOTE doesn't give a file name // NOTE does not work on windows files.head.newInputStream() case x => sys error s"unexpected DataFlavor ${x.toString}" } } }
ritschwumm/scutil
modules/gui/src/main/scala/scutil/gui/DndFileExport.scala
Scala
bsd-2-clause
2,319
package chana.jpql.nodes import xtc.tree.Node class JPQLParser(rootNode: Node) { private var indentLevel = 0 protected var nodePath = List[Node]() protected var _errors = List[Node]() // root def visitRoot() = { JPQL(rootNode) } protected def enter(node: Node): Unit = { indentLevel += 1 nodePath ::= node // push } protected def exit(node: Node) { indentLevel -= 1 nodePath = nodePath.tail // pop } // -------- general node visit method private def visit[T](node: Node)(body: Node => T): T = { enter(node) val res = body(node) exit(node) res } private def visitOpt[T](node: Node)(body: Node => T): Option[T] = { if (node eq null) None else Some(visit(node)(body)) } private def visitList[T](nodes: xtc.util.Pair[Node])(body: Node => T): List[T] = { if (nodes eq null) Nil else { var rs = List[T]() val xs = nodes.iterator while (xs.hasNext) { rs ::= visit(xs.next)(body) } rs.reverse } } // ========================================================================= def JPQL(node: Node) = { val n = node.getNode(0) n.getName match { case "SelectStatement" => visit(n)(selectStatement) case "UpdateStatement" => visit(n)(updateStatement) case "DeleteStatement" => visit(n)(deleteStatement) } } /*- SelectClause FromClause WhereClause? GroupbyClause? HavingClause? OrderbyClause? */ def selectStatement(node: Node) = { val select = visit(node.getNode(0))(selectClause) val from = visit(node.getNode(1))(fromClause) val where = visitOpt(node.getNode(2))(whereClause) val groupby = visitOpt(node.getNode(3))(groupbyClause) val having = visitOpt(node.getNode(4))(havingClause) val orderby = visitOpt(node.getNode(5))(orderbyClause) SelectStatement(select, from, where, groupby, having, orderby) } /*- UpdateClause SetClause WhereClause? */ def updateStatement(node: Node) = { val update = visit(node.getNode(0))(updateClause) val set = visit(node.getNode(1))(setClause) val where = visitOpt(node.getNode(2))(whereClause) UpdateStatement(update, set, where) } /*- UPDATE EntityName ( AS? Ident )? */ def updateClause(node: Node) = { val entity = visit(node.getNode(0))(entityName) val as = visitOpt(node.getNode(1))(ident) UpdateClause(entity, as) } /*- SET SetAssignClause ( COMMA SetAssignClause )* */ def setClause(node: Node) = { val assign = visit(node.getNode(0))(setAssignClause) val assigns = visitList(node.getList(1))(setAssignClause) SetClause(assign, assigns) } /*- SetAssignTarget EQ NewValue */ def setAssignClause(node: Node) = { val target = visit(node.getNode(0))(setAssignTarget) val newVal = visit(node.getNode(1))(newValue) SetAssignClause(target, newVal) } /*- PathExpr / Attribute */ def setAssignTarget(node: Node) = { val n = node.getNode(0) val target = n.getName match { case "PathExpr" => Left(visit(n)(pathExpr)) case "Attribute" => Right(visit(n)(attribute)) } SetAssignTarget(target) } /*- ScalarExpr / NULL */ def newValue(node: Node) = { val v = if (node.isEmpty) null else { visit(node.getNode(0))(scalarExpr) } NewValue(v) } /*- DeleteClause WhereClause? */ def deleteStatement(node: Node) = { val delete = visit(node.getNode(0))(deleteClause) val where = visitOpt(node.getNode(1))(whereClause) DeleteStatement(delete, where) } /*- DELETE FROM EntityName ( AS? Ident )? */ def deleteClause(node: Node) = { val name = visit(node.getNode(0))(entityName) val as = visitOpt(node.getNode(1))(ident) DeleteClause(name, as) } /*- SELECT DISTINCT? SelectItem (COMMA SelectItem )* */ def selectClause(node: Node) = { val isDistinct = node.get(0) ne null val item = visit(node.getNode(1))(selectItem) val items = visitList(node.getList(2))(selectItem) SelectClause(isDistinct, item, items) } /*- SelectExpr ( AS? Ident )? */ def selectItem(node: Node) = { val item = visit(node.getNode(0))(selectExpr) val as = visitOpt(node.getNode(1))(ident) SelectItem(item, as) } /*- AggregateExpr / ScalarExpr / OBJECT LParen VarAccessOrTypeConstant RParen / ConstructorExpr / MapEntryExpr */ def selectExpr(node: Node): SelectExpr = { val n = node.getNode(0) n.getName match { case "AggregateExpr" => SelectExpr_AggregateExpr(visit(n)(aggregateExpr)) case "ScalarExpr" => SelectExpr_ScalarExpr(visit(n)(scalarExpr)) case "VarAccessOrTypeConstant" => SelectExpr_OBJECT(visit(n)(varAccessOrTypeConstant)) case "ConstructorExpr" => SelectExpr_ConstructorExpr(visit(n)(constructorExpr)) case "MapEntryExpr" => SelectExpr_MapEntryExpr(visit(n)(mapEntryExpr)) } } /*- ENTRY LParen VarAccessOrTypeConstant RParen */ def mapEntryExpr(node: Node) = { val entry = visit(node.getNode(0))(varAccessOrTypeConstant) MapEntryExpr(entry) } /*- QualIdentVar ( DOT Attribute )* */ def pathExprOrVarAccess(node: Node) = { val qual = visit(node.getNode(0))(qualIdentVar) val attributes = visitList(node.getList(1))(attribute) PathExprOrVarAccess(qual, attributes) } /*- VarAccessOrTypeConstant / KEY LParen VarAccessOrTypeConstant RParen / VALUE LParen VarAccessOrTypeConstant RParen */ def qualIdentVar(node: Node) = { node.size match { case 1 => QualIdentVar_VarAccessOrTypeConstant(visit(node.getNode(0))(varAccessOrTypeConstant)) case 2 => node.getString(0) match { case "key" => QualIdentVar_KEY(visit(node.getNode(1))(varAccessOrTypeConstant)) case "value" => QualIdentVar_VALUE(visit(node.getNode(1))(varAccessOrTypeConstant)) } } } /*- AVG LParen DISTINCT? ScalarExpr RParen / MAX LParen DISTINCT? ScalarExpr RParen / MIN LParen DISTINCT? ScalarExpr RParen / SUM LParen DISTINCT? ScalarExpr RParen / COUNT LParen DISTINCT? ScalarExpr RParen */ def aggregateExpr(node: Node) = { val isDistinct = node.get(1) eq null val expr = visit(node.getNode(2))(scalarExpr) node.getString(0) match { case "avg" => AggregateExpr_AVG(isDistinct, expr) case "max" => AggregateExpr_MAX(isDistinct, expr) case "min" => AggregateExpr_MIN(isDistinct, expr) case "sum" => AggregateExpr_SUM(isDistinct, expr) case "count" => AggregateExpr_COUNT(isDistinct, expr) } } /*- NEW ConstructorName LParen ConstructorItem ( COMMA ConstructorItem )* RParen */ def constructorExpr(node: Node) = { val name = visit(node.getNode(0))(constructorName) val arg = visit(node.getNode(1))(constructorItem) val args = visitList(node.getList(2))(constructorItem) ConstructorExpr(name, arg, args) } /*- Ident ( DOT Ident )* */ def constructorName(node: Node) = { val field = visit(node.getNode(0))(ident) val paths = visitList(node.getList(1))(ident) ConstructorName(field, paths) } /*- ScalarExpr / AggregateExpr */ def constructorItem(node: Node) = { val n = node.getNode(0) n.getName match { case "ScalarExpr" => ConstructorItem_ScalarExpr(visit(n)(scalarExpr)) case "AggregateExpr" => ConstructorItem_AggregateExpr(visit(n)(aggregateExpr)) } } /*- FROM IdentVarDecl ( COMMA ( IdentVarDecl / CollectionMemberDecl) )* */ def fromClause(node: Node) = { val from = visit(node.getNode(0))(identVarDecl) val froms = visitList(node.getList(1)) { n => n.getName match { case "IdentVarDecl" => Left(visit(n)(identVarDecl)) case "CollectionMemberDecl" => Right(visit(n)(collectionMemberDecl)) } } FromClause(from, froms) } /*- RangeVarDecl Join* */ def identVarDecl(node: Node) = { val varDecl = visit(node.getNode(0))(rangeVarDecl) val joins = visitList(node.getList(1))(join) IdentVarDecl(varDecl, joins) } /*- EntityName AS? Ident */ def rangeVarDecl(node: Node) = { val entity = visit(node.getNode(0))(entityName) val as = visit(node.getNode(1))(ident) RangeVarDecl(entity, as) } /*- Identifier */ def entityName(node: Node) = { val name = node.getString(0) EntityName(name) } /*- JoinSpec JoinAssocPathExpr AS? Ident JoinCond? / JoinSpec TREAT LParen JoinAssocPathExpr AS Ident RParen AS? Ident JoinCond? / JoinSpec FETCH JoinAssocPathExpr Ident? JoinCond? */ def join(node: Node) = { val spec = visit(node.getNode(0))(joinSpec) node.size match { case 4 => val expr = visit(node.getNode(1))(joinAssocPathExpr) val as = visit(node.getNode(2))(ident) val cond = visitOpt(node.getNode(3))(joinCond) Join_General(spec, expr, as, cond) case 6 => val expr = visit(node.getNode(2))(joinAssocPathExpr) val exprAs = visit(node.getNode(3))(ident) val as = visit(node.getNode(4))(ident) val cond = visitOpt(node.getNode(5))(joinCond) Join_TREAT(spec, expr, exprAs, as, cond) case 5 => val expr = visit(node.getNode(2))(joinAssocPathExpr) val as = visitOpt(node.getNode(3))(ident) val cond = visitOpt(node.getNode(4))(joinCond) Join_FETCH(spec, expr, as, cond) } } /*- JOIN / LEFT JOIN / LEFT OUTER JOIN / INNER JOIN */ def joinSpec(node: Node) = { node.size match { case 1 => JOIN case 2 => node.getString(0) match { case "left" => LEFT_JOIN case "inner" => INNER_JOIN } case 3 => LEFT_OUTER_JOIN } } /*- ON CondExpr */ def joinCond(node: Node) = { val expr = visit(node.getNode(0))(condExpr) JoinCond(expr) } /*- IN LParen CollectionValuedPathExpr RParen AS? Ident */ def collectionMemberDecl(node: Node) = { val expr = visit(node.getNode(0))(collectionValuedPathExpr) val as = visit(node.getNode(1))(ident) CollectionMemberDecl(expr, as) } /*- PathExpr */ def collectionValuedPathExpr(node: Node) = { val expr = visit(node.getNode(0))(pathExpr) CollectionValuedPathExpr(expr) } /*- PathExpr */ def assocPathExpr(node: Node) = { val expr = visit(node.getNode(0))(pathExpr) AssocPathExpr(expr) } /*- QualIdentVar ( DOT Attribute )* */ def joinAssocPathExpr(node: Node) = { val qual = visit(node.getNode(0))(qualIdentVar) val attributes = visitList(node.getList(1))(attribute) JoinAssocPathExpr(qual, attributes) } /*- PathExpr */ def singleValuedPathExpr(node: Node) = { val expr = visit(node.getNode(0))(pathExpr) SingleValuedPathExpr(expr) } /*- PathExpr */ def stateFieldPathExpr(node: Node) = { val expr = visit(node.getNode(0))(pathExpr) StateFieldPathExpr(expr) } /*- QualIdentVar ( DOT Attribute )+ */ def pathExpr(node: Node) = { val qual = visit(node.getNode(0))(qualIdentVar) val attributes = visitList(node.getList(1))(attribute) PathExpr(qual, attributes) } /*- AttributeName */ def attribute(node: Node) = { val name = node.getString(0) Attribute(name) } /*- Ident */ def varAccessOrTypeConstant(node: Node) = { val id = visit(node.getNode(0))(ident) VarAccessOrTypeConstant(id) } /*- WHERE CondExpr */ def whereClause(node: Node) = { val expr = visit(node.getNode(0))(condExpr) WhereClause(expr) } /*- CondTerm ( OR CondTerm )* */ def condExpr(node: Node): CondExpr = { val term = visit(node.getNode(0))(condTerm) val andTerms = visitList(node.getList(1))(condTerm) CondExpr(term, andTerms) } /*- CondFactor ( AND CondFactor )* */ def condTerm(node: Node) = { val factor = visit(node.getNode(0))(condFactor) val andFactors = visitList(node.getList(1))(condFactor) CondTerm(factor, andFactors) } /*- NOT? ( CondPrimary / ExistsExpr ) */ def condFactor(node: Node) = { val not = node.get(0) ne null val n = node.getNode(1) val expr = n.getName match { case "CondPrimary" => Left(visit(n)(condPrimary)) case "ExistsExpr" => Right(visit(n)(existsExpr)) } CondFactor(not, expr) } /*- LParen CondExpr RParen / SimpleCondExpr */ def condPrimary(node: Node) = { val n = node.getNode(0) n.getName match { case "CondExpr" => CondPrimary_CondExpr(visit(n)(condExpr)) case "SimpleCondExpr" => CondPrimary_SimpleCondExpr(visit(n)(simpleCondExpr)) } } /*- ArithExpr SimpleCondExprRem / NonArithScalarExpr SimpleCondExprRem */ def simpleCondExpr(node: Node) = { val n = node.getNode(0) val expr = n.getName match { case "ArithExpr" => Left(visit(n)(arithExpr)) case "NonArithScalarExpr" => Right(visit(n)(nonArithScalarExpr)) } val rem = visit(node.getNode(1))(simpleCondExprRem) SimpleCondExpr(expr, rem) } /*- ComparisonExpr / NOT? CondWithNotExpr / IS NOT? IsExpr */ def simpleCondExprRem(node: Node) = { node.get(0) match { case n: Node => SimpleCondExprRem_ComparisonExpr(visit(n)(comparisonExpr)) case null => val n2 = node.getNode(1) n2.getName match { case "CondWithNotExpr" => SimpleCondExprRem_CondWithNotExpr(false, visit(n2)(condWithNotExpr)) case "IsExpr" => SimpleCondExprRem_IsExpr(false, visit(n2)(isExpr)) } case "not" => val n2 = node.getNode(1) n2.getName match { case "CondWithNotExpr" => SimpleCondExprRem_CondWithNotExpr(true, visit(n2)(condWithNotExpr)) case "IsExpr" => SimpleCondExprRem_IsExpr(true, visit(n2)(isExpr)) } } } /*- BetweenExpr / LikeExpr / InExpr / CollectionMemberExpr */ def condWithNotExpr(node: Node) = { val n = node.getNode(0) n.getName match { case "BetweenExpr" => CondWithNotExpr_BetweenExpr(visit(n)(betweenExpr)) case "LikeExpr" => CondWithNotExpr_LikeExpr(visit(n)(likeExpr)) case "InExpr" => CondWithNotExpr_InExpr(visit(n)(inExpr)) case "CollectionMemberExpr" => CondWithNotExpr_CollectionMemberExpr(visit(n)(collectionMemberExpr)) } } /*- NullComparisonExpr / EmptyCollectionComparisonExpr */ def isExpr(node: Node) = { node.getNode(0).getName match { case "NullComparisonExpr" => IsNullExpr case "EmptyCollectionComparisonExpr" => IsEmptyExpr } } /*- BETWEEN ScalarOrSubselectExpr AND ScalarOrSubselectExpr */ def betweenExpr(node: Node) = { val minExpr = visit(node.getNode(0))(scalarOrSubselectExpr) val maxExpr = visit(node.getNode(1))(scalarOrSubselectExpr) BetweenExpr(minExpr, maxExpr) } /*- IN InputParam / IN LParen ScalarOrSubselectExpr ( COMMA ScalarOrSubselectExpr )* RParen / IN LParen Subquery RParen */ def inExpr(node: Node) = { val n = node.getNode(0) n.getName match { case "InputParam" => InExpr_InputParam(visit(n)(inputParam)) case "ScalarOrSubselectExpr" => InExpr_ScalarOrSubselectExpr(visit(n)(scalarOrSubselectExpr), visitList(node.getList(1))(scalarOrSubselectExpr)) case "Subquery" => InExpr_Subquery(visit(n)(subquery)) } } /*- LIKE ScalarOrSubselectExpr Escape? */ def likeExpr(node: Node) = { val like = visit(node.getNode(0))(scalarOrSubselectExpr) val esc = visitOpt(node.getNode(1))(escape) LikeExpr(like, esc) } /*- ESCAPE ScalarExpr */ def escape(node: Node) = { val expr = visit(node.getNode(0))(scalarExpr) Escape(expr) } /*- MEMBER OF? CollectionValuedPathExpr */ def collectionMemberExpr(node: Node) = { val expr = visit(node.getNode(0))(collectionValuedPathExpr) CollectionMemberExpr(expr) } /*- EXISTS LParen Subquery RParen */ def existsExpr(node: Node) = { val subq = visit(node.getNode(0))(subquery) ExistsExpr(subq) } /*- EQ ComparisonExprRightOperand / NE ComparisonExprRightOperand / GT ComparisonExprRightOperand / GE ComparisonExprRightOperand / LT ComparisonExprRightOperand / LE ComparisonExprRightOperand */ def comparisonExpr(node: Node) = { val op = node.getString(0) match { case "=" => EQ case "<>" => NE case ">" => GT case ">=" => GE case "<" => LT case "<=" => LE } val right = visit(node.getNode(1))(comparisonExprRightOperand) ComparisonExpr(op, right) } /*- ArithExpr / NonArithScalarExpr / AnyOrAllExpr */ def comparisonExprRightOperand(node: Node) = { val n = node.getNode(0) n.getName match { case "ArithExpr" => ComparsionExprRightOperand_ArithExpr(visit(n)(arithExpr)) case "NonArithScalarExpr" => ComparsionExprRightOperand_NonArithScalarExpr(visit(n)(nonArithScalarExpr)) case "AnyOrAllExpr" => ComparsionExprRightOperand_AnyOrAllExpr(visit(n)(anyOrAllExpr)) } } /*- SimpleArithExpr / LParen Subquery RParen */ def arithExpr(node: Node) = { val n = node.getNode(0) val expr = n.getName match { case "SimpleArithExpr" => Left(visit(n)(simpleArithExpr)) case "Subquery" => Right(visit(n)(subquery)) } ArithExpr(expr) } /*- ArithTerm ( ArithTermPlus / ArithTermMinus )* */ def simpleArithExpr(node: Node) = { val term = visit(node.getNode(0))(arithTerm) val terms = visitList(node.getList(1)) { n => n.getName match { case "ArithTermPlus" => ArithTerm_Plus(visit(n.getNode(0))(arithTerm)) case "ArithTermMinus" => ArithTerm_Minus(visit(n.getNode(0))(arithTerm)) } } SimpleArithExpr(term, terms) } /*- ArithFactor ( ArithFactorMultiply / ArithFactorDivide )* */ def arithTerm(node: Node): ArithTerm = { val factor = visit(node.getNode(0))(arithFactor) val factors = visitList(node.getList(1)) { n => n.getName match { case "ArithFactorMultiply" => ArithFactor_Multiply(visit(n.getNode(0))(arithFactor)) case "ArithFactorDivide" => ArithFactor_Divide(visit(n.getNode(0))(arithFactor)) } } ArithTerm(factor, factors) } /*- ArithPrimaryPlus / ArithPrimaryMinus / ArithPrimary */ def arithFactor(node: Node) = { val n = node.getNode(0) val primary = n.getName match { case "ArithPrimaryPlus" => ArithPrimary_Plus(visit(n.getNode(0))(arithPrimary)) case "ArithPrimaryMinus" => ArithPrimary_Minus(visit(n.getNode(0))(arithPrimary)) case "ArithPrimary" => ArithPrimary_Plus(visit(n)(arithPrimary)) // default "+" } ArithFactor(primary) } /*- PathExprOrVarAccess / InputParam / CaseExpr / FuncsReturningNumerics / LParen SimpleArithExpr RParen / LiteralNumeric */ def arithPrimary(node: Node) = { node.get(0) match { case n: Node => n.getName match { case "PathExprOrVarAccess" => ArithPrimary_PathExprOrVarAccess(visit(n)(pathExprOrVarAccess)) case "InputParam" => ArithPrimary_InputParam(visit(n)(inputParam)) case "CaseExpr" => ArithPrimary_CaseExpr(visit(n)(caseExpr)) case "FuncsReturningNumerics" => ArithPrimary_FuncsReturningNumerics(visit(n)(funcsReturningNumerics)) case "SimpleArithExpr" => ArithPrimary_SimpleArithExpr(visit(n)(simpleArithExpr)) } case v: Number => ArithPrimary_LiteralNumeric(v) } } /*- SimpleArithExpr / NonArithScalarExpr */ def scalarExpr(node: Node): ScalarExpr = { val n = node.getNode(0) n.getName match { case "SimpleArithExpr" => ScalarExpr_SimpleArithExpr(visit(n)(simpleArithExpr)) case "NonArithScalarExpr" => ScalarExpr_NonArithScalarExpr(visit(n)(nonArithScalarExpr)) } } /*- ArithExpr / NonArithScalarExpr */ def scalarOrSubselectExpr(node: Node) = { val n = node.getNode(0) n.getName match { case "ArithExpr" => ScalarOrSubselectExpr_ArithExpr(visit(n)(arithExpr)) case "NonArithScalarExpr" => ScalarOrSubselectExpr_NonArithScalarExpr(visit(n)(nonArithScalarExpr)) } } /*- FuncsReturningDatetime / FuncsReturningStrings / LiteralString / LiteralBoolean / LiteralTemporal / EntityTypeExpr */ def nonArithScalarExpr(node: Node) = { node.get(0) match { case n: Node => n.getName match { case "FuncsReturningDatetime" => NonArithScalarExpr_FuncsReturningDatetime(visit(n)(funcsReturningDatetime)) case "FuncsReturningStrings" => NonArithScalarExpr_FuncsReturningStrings(visit(n)(funcsReturningStrings)) case "EntityTypeExpr" => NonArithScalarExpr_EntityTypeExpr(visit(n)(entityTypeExpr)) } case v: java.lang.String => NonArithScalarExpr_LiteralString(v) case v: java.lang.Boolean => NonArithScalarExpr_LiteralBoolean(v) case v: java.time.temporal.Temporal => NonArithScalarExpr_LiteralTemporal(v) } } /*- ALL LParen Subquery RParen / ANY LParen Subquery RParen / SOME LParen Subquery RParen */ def anyOrAllExpr(node: Node) = { val anyOrAll = node.getString(0) match { case "all" => ALL case "any" => ANY case "some" => SOME } val subq = visit(node.getNode(1))(subquery) AnyOrAllExpr(anyOrAll, subq) } /*- TypeDiscriminator */ def entityTypeExpr(node: Node) = { val tp = visit(node.getNode(0))(typeDiscriminator) EntityTypeExpr(tp) } /*- TYPE LParen VarOrSingleValuedPath RParen / TYPE LParen InputParam RParen */ def typeDiscriminator(node: Node) = { val n = node.getNode(0) val expr = n.getName match { case "VarOrSingleValuedPath" => Left(visit(n)(varOrSingleValuedPath)) case "InputParam" => Right(visit(n)(inputParam)) } TypeDiscriminator(expr) } /*- SimpleCaseExpr / GeneralCaseExpr / CoalesceExpr / NullifExpr */ def caseExpr(node: Node) = { val n = node.getNode(0) n.getName match { case "SimpleCaseExpr" => CaseExpr_SimpleCaseExpr(visit(n)(simpleCaseExpr)) case "GeneralCaseExpr" => CaseExpr_GeneralCaseExpr(visit(n)(generalCaseExpr)) case "CoalesceExpr" => CaseExpr_CoalesceExpr(visit(n)(coalesceExpr)) case "NullifExpr" => CaseExpr_NullifExpr(visit(n)(nullifExpr)) } } /*- CASE CaseOperand SimpleWhenClause SimpleWhenClause* ELSE ScalarExpr END */ def simpleCaseExpr(node: Node) = { val operand = visit(node.getNode(0))(caseOperand) val when = visit(node.getNode(1))(simpleWhenClause) val whens = visitList(node.getList(2))(simpleWhenClause) val elseExpr = visit(node.getNode(3))(scalarExpr) SimpleCaseExpr(operand, when, whens, elseExpr) } /*- CASE WhenClause WhenClause* ELSE ScalarExpr END */ def generalCaseExpr(node: Node) = { val when = visit(node.getNode(0))(whenClause) val whens = visitList(node.getList(1))(whenClause) val elseExpr = visit(node.getNode(2))(scalarExpr) GeneralCaseExpr(when, whens, elseExpr) } /*- COALESCE LParen ScalarExpr ( COMMA ScalarExpr )+ RParen */ def coalesceExpr(node: Node) = { val expr = visit(node.getNode(0))(scalarExpr) val exprs = visitList(node.getList(1))(scalarExpr) CoalesceExpr(expr, exprs) } /*- NULLIF LParen ScalarExpr COMMA ScalarExpr RParen */ def nullifExpr(node: Node) = { val expr = visit(node.getNode(0))(scalarExpr) val rightExpr = visit(node.getNode(1))(scalarExpr) NullifExpr(expr, rightExpr) } /*- StateFieldPathExpr / TypeDiscriminator */ def caseOperand(node: Node) = { val n = node.getNode(0) val expr = n.getName match { case "StateFieldPathExpr" => Left(visit(n)(stateFieldPathExpr)) case "TypeDiscriminator" => Right(visit(n)(typeDiscriminator)) } CaseOperand(expr) } /*- WHEN CondExpr THEN ScalarExpr */ def whenClause(node: Node) = { val when = visit(node.getNode(0))(condExpr) val thenExpr = visit(node.getNode(1))(scalarExpr) WhenClause(when, thenExpr) } /*- WHEN ScalarExpr THEN ScalarExpr */ def simpleWhenClause(node: Node) = { val when = visit(node.getNode(0))(scalarExpr) val thenExpr = visit(node.getNode(1))(scalarExpr) SimpleWhenClause(when, thenExpr) } /*- SingleValuedPathExpr / VarAccessOrTypeConstant */ def varOrSingleValuedPath(node: Node) = { val n = node.getNode(0) val expr = n.getName match { case "SingleValuedPathExpr" => Left(visit(n)(singleValuedPathExpr)) case "VarAccessOrTypeConstant" => Right(visit(n)(varAccessOrTypeConstant)) } VarOrSingleValuedPath(expr) } /*- LiteralString / FuncsReturningStrings / InputParam / StateFieldPathExpr */ def stringPrimary(node: Node) = { node.get(0) match { case v: String => StringPrimary_LiteralString(v) case n: Node => n.getName match { case "FuncsReturningStrings" => StringPrimary_FuncsReturningStrings(visit(n)(funcsReturningStrings)) case "InputParam" => StringPrimary_InputParam(visit(n)(inputParam)) case "StateFieldPathExpr" => StringPrimary_StateFieldPathExpr(visit(n)(stateFieldPathExpr)) } } } /*- LiteralNumeric / LiteralBoolean / LiteralString */ def literal(node: Node): AnyRef = { node.get(0) match { case v: java.lang.Integer => v case v: java.lang.Long => v case v: java.lang.Float => v case v: java.lang.Double => v case v: java.lang.Boolean => v case v: java.lang.String => v } } /*- namedInputParam / positionInputParam String namedInputParam = void:':' v:identifier { yyValue = v; } ; Integer positionInputParam = void:'?' v:position { yyValue = Integer.parseInt(v, 10); } ; transient String position = [1-9] [0-9]* ; void:':' v:identifier { yyValue = v; } / void:'?' v:position { yyValue = Integer.parseInt(v, 10); } */ def inputParam(node: Node) = { node.get(0) match { case v: java.lang.String => InputParam_Named(v) case v: java.lang.Integer => InputParam_Position(v) } } /*- Abs / Length / Mod / Locate / Size / Sqrt / Index / Func */ def funcsReturningNumerics(node: Node) = { val n = node.getNode(0) n.getName match { case "Abs" => visit(n)(abs) case "Length" => visit(n)(length) case "Mod" => visit(n)(mod) case "Locate" => visit(n)(locate) case "Size" => visit(n)(size) case "Sqrt" => visit(n)(sqrt) case "Index" => visit(n)(index) case "Func" => visit(n)(func) } } /*- CURRENT_DATE / CURRENT_TIME / CURRENT_TIMESTAMP */ def funcsReturningDatetime(node: Node) = { node.getString(0) match { case "current_date" => CURRENT_DATE case "current_time" => CURRENT_TIME case "current_timestamp" => CURRENT_TIMESTAMP } } /*- Concat / Substring / Trim / Upper / Lower */ def funcsReturningStrings(node: Node): FuncsReturningStrings = { val n = node.getNode(0) n.getName match { case "Concat" => visit(n)(concat) case "Substring" => visit(n)(substring) case "Trim" => visit(n)(trim) case "Upper" => visit(n)(upper) case "Lower" => visit(n)(lower) } } /*- CONCAT LParen ScalarExpr (COMMA ScalarExpr)+ RParen */ def concat(node: Node) = { val expr = visit(node.getNode(0))(scalarExpr) val exprs = visitList(node.getList(1))(scalarExpr) Concat(expr, exprs) } /*- SUBSTRING LParen ScalarExpr COMMA ScalarExpr ( COMMA ScalarExpr )? RParen */ def substring(node: Node) = { val expr = visit(node.getNode(0))(scalarExpr) val expr2 = visit(node.getNode(1))(scalarExpr) val expr3 = visitOpt(node.getNode(2))(scalarExpr) Substring(expr, expr2, expr3) } /*- TRIM LParen TrimSpec? TrimChar? FROM? StringPrimary RParen */ def trim(node: Node) = { val spec = visitOpt(node.getNode(0))(trimSpec) // default BOTH val char = visitOpt(node.getNode(1))(trimChar) val from = visit(node.getNode(2))(stringPrimary) Trim(spec, char, from) } /*- LEADING / TRAILING / BOTH */ def trimSpec(node: Node) = { node.getString(0) match { case "leading" => LEADING case "trailing" => TRAILING case "both" => BOTH } } /*- LiteralSingleQuotedString / InputParam */ def trimChar(node: Node) = { node.get(0) match { case n: Node => TrimChar_InputParam(visit(n)(inputParam)) case v: String => TrimChar_String(v) } } /*- UPPER LParen ScalarExpr RParen */ def upper(node: Node) = { val expr = visit(node.getNode(0))(scalarExpr) Upper(expr) } /*- LOWER LParen ScalarExpr RParen */ def lower(node: Node) = { val expr = visit(node.getNode(0))(scalarExpr) Lower(expr) } /*- ABS LParen SimpleArithExpr RParen */ def abs(node: Node) = { val expr = visit(node.getNode(0))(simpleArithExpr) Abs(expr) } /*- LENGTH LParen ScalarExpr RParen */ def length(node: Node) = { val expr = visit(node.getNode(0))(scalarExpr) Length(expr) } /*- LOCATE LParen ScalarExpr COMMA ScalarExpr ( COMMA ScalarExpr )? RParen */ def locate(node: Node) = { val expr = visit(node.getNode(0))(scalarExpr) val searchExpr = visit(node.getNode(1))(scalarExpr) val startExpr = visitOpt(node.getNode(2))(scalarExpr) Locate(expr, searchExpr, startExpr) } /*- SIZE LParen CollectionValuedPathExpr RParen */ def size(node: Node) = { val expr = visit(node.getNode(0))(collectionValuedPathExpr) Size(expr) } /*- MOD LParen ScalarExpr COMMA ScalarExpr RParen */ def mod(node: Node) = { val expr = visit(node.getNode(0))(scalarExpr) val divisorExpr = visit(node.getNode(1))(scalarExpr) Mod(expr, divisorExpr) } /*- SQRT LParen ScalarExpr RParen */ def sqrt(node: Node) = { val expr = visit(node.getNode(0))(scalarExpr) Sqrt(expr) } /*- INDEX LParen VarAccessOrTypeConstant RParen */ def index(node: Node) = { val expr = visit(node.getNode(0))(varAccessOrTypeConstant) Index(expr) } /*- FUNCTION LParen LiteralSingleQuotedString ( COMMA NewValue )* RParen */ def func(node: Node) = { val name = node.getString(0) val args = visitList(node.getList(1))(newValue) Func(name, args) } /*- SimpleSelectClause SubqueryFromClause WhereClause? GroupbyClause? HavingClause? */ def subquery(node: Node) = { val select = visit(node.getNode(0))(simpleSelectClause) val from = visit(node.getNode(1))(subqueryFromClause) val where = visitOpt(node.getNode(2))(whereClause) val groupby = visitOpt(node.getNode(3))(groupbyClause) val having = visitOpt(node.getNode(4))(havingClause) Subquery(select, from, where, groupby, having) } /*- SELECT DISTINCT? SimpleSelectExpr */ def simpleSelectClause(node: Node) = { val isDistinct = node.get(0) ne null val expr = visit(node.getNode(1))(simpleSelectExpr) SimpleSelectClause(isDistinct, expr) } /*- SingleValuedPathExpr / AggregateExpr / VarAccessOrTypeConstant */ def simpleSelectExpr(node: Node) = { val n = node.getNode(0) n.getName match { case "SingleValuedPathExpr" => SimpleSelectExpr_SingleValuedPathExpr(visit(n)(singleValuedPathExpr)) case "AggregateExpr" => SimpleSelectExpr_AggregateExpr(visit(n)(aggregateExpr)) case "VarAccessOrTypeConstant" => SimpleSelectExpr_VarAccessOrTypeConstant(visit(n)(varAccessOrTypeConstant)) } } /*- FROM SubselectIdentVarDecl ( COMMA ( SubselectIdentVarDecl / CollectionMemberDecl ) )* */ def subqueryFromClause(node: Node) = { val from = visit(node.getNode(0))(subselectIdentVarDecl) val froms = visitList(node.getList(1)) { n => n.getName match { case "SubselectIdentVarDecl" => Left(visit(n)(subselectIdentVarDecl)) case "CollectionMemberDecl" => Right(visit(n)(collectionMemberDecl)) } } SubqueryFromClause(from, froms) } /*- IdentVarDecl / AssocPathExpr AS? Ident / CollectionMemberDecl */ def subselectIdentVarDecl(node: Node) = { val n = node.getNode(0) n.getName match { case "IdentVarDecl" => SubselectIdentVarDecl_IdentVarDecl(visit(n)(identVarDecl)) case "AssoPathExpr" => SubselectIdentVarDecl_AssocPathExpr(visit(n)(assocPathExpr), visit(node.getNode(1))(ident)) case "CollectionMemberDecl" => SubselectIdentVarDecl_CollectionMemberDecl(visit(n)(collectionMemberDecl)) } } /*- ORDER BY OrderbyItem ( COMMA OrderbyItem )* */ def orderbyClause(node: Node) = { val item = visit(node.getNode(0))(orderbyItem) val items = visitList(node.getList(1))(orderbyItem) OrderbyClause(item, items) } /*- ( SimpleArithExpr / ScalarExpr ) ( ASC / DESC )? */ def orderbyItem(node: Node) = { val n = node.getNode(0) val expr = n.getName match { case "SimpleArithExpr" => Left(visit(n)(simpleArithExpr)) case "ScalarExpr" => Right(visit(n)(scalarExpr)) } val isAsc = node.getString(1) match { case null => true case "asc" => true case "desc" => false } OrderbyItem(expr, isAsc) } /*- GROUP BY ScalarExpr ( COMMA ScalarExpr )* */ def groupbyClause(node: Node) = { val expr = visit(node.getNode(0))(scalarExpr) val exprs = visitList(node.getList(1))(scalarExpr) GroupbyClause(expr, exprs) } /*- HAVING CondExpr */ def havingClause(node: Node) = { val expr = visit(node.getNode(0))(condExpr) HavingClause(expr) } /*- Identifier */ def ident(node: Node) = { val name = node.getString(0) Ident(name) } }
matthewtt/chana
src/main/scala/chana/jpql/nodes/JPQLParser.scala
Scala
apache-2.0
34,816
package de.frosner.broccoli.nomad.models import play.api.libs.functional.syntax._ import play.api.libs.json.{JsPath, Reads} import shapeless.tag import shapeless.tag.@@ /** * A partial model for a single allocations. */ final case class Allocation( id: String @@ Allocation.Id, jobId: String @@ Job.Id, nodeId: String @@ Node.Id, clientStatus: ClientStatus, taskStates: Map[String @@ Task.Name, TaskStateEvents] ) object Allocation { trait Id implicit val allocationReads: Reads[Allocation] = ((JsPath \ "ID").read[String].map(tag[Allocation.Id](_)) and (JsPath \ "JobID").read[String].map(tag[Job.Id](_)) and (JsPath \ "NodeID").read[String].map(tag[Node.Id](_)) and (JsPath \ "ClientStatus").read[ClientStatus] and (JsPath \ "TaskStates") .readNullable[Map[String, TaskStateEvents]] // Tag all values as task name. Since Task.Name is a phantom type this is a safe thing to do, albeit it doesn't // look like so .map(_.getOrElse(Map.empty).asInstanceOf[Map[String @@ Task.Name, TaskStateEvents]]))(Allocation.apply _) }
FRosner/cluster-broccoli
server/src/main/scala/de/frosner/broccoli/nomad/models/Allocation.scala
Scala
apache-2.0
1,113
/* * The MIT License (MIT) * * Copyright (c) 2017 Ángel Cervera Claudio * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ package com.acervera.osm4scala import java.io.InputStream import com.acervera.osm4scala.model.OSMEntity import org.openstreetmap.osmosis.osmbinary.fileformat.Blob /** * Iterable process all entities. */ trait EntityIterator extends Iterator[OSMEntity] /** * Factory to create EntityIterator objects from different sources. */ object EntityIterator { /** * Create an iterator to iterate over all entities in the InputStream. * * @param pbfInputStream InputStream object to process * @return Iterator */ def fromPbf(pbfInputStream: InputStream): EntityIterator = new FromPbfFileEntitiesIterator(new DefaultInputStreamSentinel(pbfInputStream)) /** * Create an iterator to iterate over all entities in the InputStream, * until the end of the stream or until the Sentinel stop it. * * @param pbfInputStream InputStream object to process with sentinel logic. * @return Iterator */ def fromPbf(pbfInputStream: InputStreamSentinel): EntityIterator = new FromPbfFileEntitiesIterator(pbfInputStream) /** * Create an iterator to iterate over all entities in th Blob. * * @param blob Blob object to process * @return Iterator */ def fromBlob(blob: Blob): EntityIterator = new FromBlobEntitiesIterator(blob) }
angelcervera/osm4scala
core/src/main/scala/com/acervera/osm4scala/EntityIterator.scala
Scala
mit
2,452
package com.twitter.finagle.netty4.http.handler import com.twitter.finagle.netty4.http.util.UriUtils import com.twitter.finagle.netty4.http.util.UriUtils.InvalidUriException import io.netty.channel.ChannelHandler.Sharable import io.netty.channel.{ChannelHandlerContext, ChannelInboundHandlerAdapter} import io.netty.handler.codec.DecoderResult import io.netty.handler.codec.http.{HttpObject, HttpRequest} /** * All inbound URIs are validated in the Netty pipeline so we can * reject malformed requests earlier, before they enter the Finagle land. This pipeline handler * does exactly this. Similar to other Netty components, it sets HTTP object's `DecoderResult` to a * failure so the next handler(s) can take an appropriate action (reject if it's a server; convert * to an exception if it's a client). */ @Sharable private[finagle] object UriValidatorHandler extends ChannelInboundHandlerAdapter { val HandlerName: String = "uriValidationHandler" override def channelRead(ctx: ChannelHandlerContext, msg: scala.Any): Unit = { msg match { case req: HttpRequest => validateUri(ctx, req, req.uri()) case _ => () } ctx.fireChannelRead(msg) } private[this] def validateUri(ctx: ChannelHandlerContext, obj: HttpObject, uri: String): Unit = if (!UriUtils.isValidUri(uri)) { obj.setDecoderResult(DecoderResult.failure(new InvalidUriException(uri))) } }
luciferous/finagle
finagle-netty4-http/src/main/scala/com/twitter/finagle/netty4/http/handler/UriValidatorHandler.scala
Scala
apache-2.0
1,413
/* * Copyright 2015 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.ibm.spark.utils import java.lang.reflect.Method import java.net.{URL, URLClassLoader} import java.util import org.slf4j.LoggerFactory import scala.collection.JavaConverters._ import scala.util.{Failure, Success, Try} import scala.language.existentials /** * Represents a class loader that supports delegating to multiple class loaders. * * @note Implements URLClassLoader purely to support the Guava requirement for * detecting all classes. * * @param urls The URLs to use for the underlying URLClassLoader * @param classLoaders The class loaders to use as the underlying * implementations of this class loader */ class MultiClassLoader( private val urls: Seq[URL], private val classLoaders: Seq[ClassLoader] ) extends URLClassLoader( classLoaders.flatMap({ case urlClassLoader: URLClassLoader => urlClassLoader.getURLs.toSeq case _ => Nil }).distinct.toArray, /* Create a parent chain based on a each classloader's parent */ { val parents = classLoaders.flatMap(cl => Option(cl.getParent)) // If multiple parents, set the parent to another multi class loader if (parents.size > 1) new MultiClassLoader(Nil, parents) // If a single parent, set the parent to that single parent else if (parents.size == 1) parents.head // If no parent, set to null (default if parent not provided) else null }: ClassLoader ) { self => private val logger = LoggerFactory.getLogger(this.getClass) /** * Creates a new multi class loader with no URLs of its own, although it may * still expose URLs from provided class loaders. * * @param classLoaders The class loaders to use as the underlying * implementations of this class loader */ def this(classLoaders: ClassLoader*) = { this(Nil, classLoaders) } override protected def findClass(name: String): Class[_] = { @inline def tryFindClass(classLoader: ClassLoader, name: String) = { Try(Class.forName(name, false, classLoader)) } // NOTE: Using iterator to evaluate elements one at a time classLoaders.toIterator .map(classLoader => tryFindClass(classLoader, name)) .find(_.isSuccess) .map(_.get) .getOrElse(throw new ClassNotFoundException(name)) } override protected def findResource(name: String): URL = { // NOTE: Using iterator to evaluate elements one at a time classLoaders.toIterator.map(cl => _findResource(cl, name)).find(_ != null) .getOrElse(super.findResource(name)) } override protected def findResources(name: String): util.Enumeration[URL] = { val internalResources = classLoaders .flatMap(cl => Try(_findResources(cl, name)).toOption) .map(_.asScala) .reduce(_ ++ _) ( internalResources ++ Try(super.findResources(name)).map(_.asScala).getOrElse(Nil) ).asJavaEnumeration } private def _findResource[T <: ClassLoader](classLoader: T, name: String) = { _getDeclaredMethod(classLoader.getClass, "findResource", classOf[String]) .invoke(classLoader, name).asInstanceOf[URL] } private def _findResources[T <: ClassLoader](classLoader: T, name: String) = { _getDeclaredMethod(classLoader.getClass, "findResources", classOf[String]) .invoke(classLoader, name).asInstanceOf[util.Enumeration[URL]] } private def _loadClass[T <: ClassLoader]( classLoader: T, name: String, resolve: Boolean ) = { _getDeclaredMethod(classLoader.getClass, "loadClass", classOf[String], classOf[Boolean] ).invoke(classLoader, name, resolve: java.lang.Boolean).asInstanceOf[Class[_]] } private def _getDeclaredMethod( klass: Class[_], name: String, classes: Class[_]* ): Method = { // Attempt to retrieve the method (public/protected/private) for the class, // trying the super class if the method is not available val potentialMethod = Try(klass.getDeclaredMethod(name, classes: _*)) .orElse(Try(_getDeclaredMethod(klass.getSuperclass, name, classes: _*))) // Allow access to protected/private methods potentialMethod.foreach(_.setAccessible(true)) potentialMethod match { case Success(method) => method case Failure(error) => throw error } } }
yeghishe/spark-kernel
kernel-api/src/main/scala/com/ibm/spark/utils/MultiClassLoader.scala
Scala
apache-2.0
4,895
package crm package sdk package messages import scala.util.control.Exception._ import dispatch._, Defaults._ import java.util.Date; import cats._ import data._ import cats.syntax._ import org.log4s._ import better.files._ import fs2._ import com.lucidchart.open.xtract._ import com.lucidchart.open.xtract.{ XmlReader, __ } import com.lucidchart.open.xtract.XmlReader._ import com.lucidchart.open.xtract._ import play.api.libs.functional.syntax._ import scala.language.implicitConversions import soapnamespaces.implicits /** * General purpose XML writer typeclass. Namespaces are handled * via implicits so you can setup a set of namespace abbrevations * for your application. */ trait CrmXmlWriter[-A] { def write(a: A)(implicit ns: NamespaceLookup): xml.NodeSeq def transform(transformer: xml.NodeSeq => xml.NodeSeq)(implicit ns: NamespaceLookup) = CrmXmlWriter[A] { a => transformer(this.write(a)(ns)) } def transform(transformer: CrmXmlWriter[xml.NodeSeq])(implicit ns: NamespaceLookup) = CrmXmlWriter[A] { a => transformer.write(this.write(a)(ns))(ns) } } object CrmXmlWriter { /** * Get a writer without having to use the implicitly syntax. */ def of[A](implicit r: CrmXmlWriter[A], ns: NamespaceLookup): CrmXmlWriter[A] = r /** Create a write using apply syntax. */ def apply[A](f: A => xml.NodeSeq): CrmXmlWriter[A] = new CrmXmlWriter[A] { def write(a: A)(implicit ns: NamespaceLookup): xml.NodeSeq = f(a) } /** * Allows you to write `yourobject.write`. */ implicit class RichCrmXmlWriter[A](a: A) { def write(implicit writer: CrmXmlWriter[A], ns: NamespaceLookup) = writer.write(a)(ns) } }
aappddeevv/mscrm-soap-auth
src/main/scala/crm/sdk/messages/CrmXmlWriter.scala
Scala
apache-2.0
1,671
package net.liftmodules.fobobs4.snippet.FoBo import scala.xml.{NodeSeq} import net.liftweb.util._ import net.liftweb.common._ import net.liftweb.http._ import Helpers._ import net.liftweb.http.js._ import net.liftweb.http.js.JsCmds._ import net.liftmodules.fobobs4.lib.{BootstrapSH => sch} import net.liftweb.sitemap.{Loc, SiteMap} /** * ==Bs4Component's Snippet Bootstrap v4.x== * * This snippet class contains a collection of functions for common transform operations useful when working * with the Bootstrap toolkit components. * '''Example''' Invoke with * {{{ data-lift="FoBo.Bs4Comp.functionName?paramName=paramValue&...." }}} * For more examples see the individual transform functions. * @since v2.0 */ class Bs4Comp extends StatefulSnippet with Loggable { private lazy val sch = new sch() def dispatch = { case "breadCrumb" => breadCrumb case "popover" => popover case "popoverAppendJs" => popoverAppendJs case "popoverPreventDefault" => popoverPreventDefault case "popoverPreventDefaultAppendJs" => popoverPreventDefaultAppendJs case "tooltip" => tooltip case "tooltipAppendJs" => tooltipAppendJs case "activateDropdown" => activateDropdown case "activateDropdownAppendJs" => activateDropdownAppendJs } /** * This function creates a bootstrap breadCrumb component using Loc information from Lift. * * '''Snippet Params:''' * * - '''Param''' ''prefix'' - A optional param to prefix the breadcrumb list with if it's value can * be found as a Loc.name. * * '''Example''' * {{{ <script data-lift="FoBo.Bs4Comp.breadCrumb?prefix=Home"></script> }}} * * @since v2.0.1 */ def breadCrumb: CssSel = { val locName = S.attr("prefix") openOr "" val aPrefix: Box[Loc[_]] = SiteMap.findLoc(locName) var breadcrumbs: List[Loc[_]] = for { currentLoc <- S.location.toList loc <- currentLoc.breadCrumbs } yield loc breadcrumbs = aPrefix match { case Full(prefix) => prefix :: breadcrumbs case _ => breadcrumbs } " *" #> <nav aria-label="breadcrumb" role="navigation"> <ol class="breadcrumb"> {breadcrumbs.map { loc => val linkText = loc.linkText.openOr(NodeSeq.Empty) val link = loc.createDefaultLink.getOrElse(NodeSeq.Empty) if (loc == S.location.openOr(NodeSeq.Empty)) <li class="breadcrumb-item active" aria-current="page"> {linkText} </li> else <li class="breadcrumb-item"> <a href={link}> {linkText} </a> </li> }} </ol> </nav> } /** * This function sets a popover action on a element by inlining a script snippet on the page. * * '''Snippet Params:''' * * - '''Param''' ''id'' - The element id * - '''Param''' ''options'' - The option string see bootstrap documentation for available options. * * '''Example''' * {{{ <script data-lift="FoBo.Bs4Comp.popover?id=#theId&options=placement:'left'"></script> }}} * * '''Result:''' This example will result in the following being injected in place of the snippet invocation: * {{{ * <script type="text/javascript"> * // <![CDATA[ * $(function () { $('#theId').popover({placement:'left'}); }); ; * // ]]> * </script> * }}} * @since v2.0 */ def popover = { var id = S.attr("id") openOr "id: NOT DEFINED!?" var options = S.attr("options") openOr "" " *" #> JsCmds.Script(sch.popoverScript(id, options)) } /** * '''Lift 3 alternativ''' -- * This function sets a popover action on a element by appending a script snippet to lift's page-script on page load function. * * '''Snippet Params:''' * * - '''Param''' ''id'' - The element id * - '''Param''' ''options'' - The option string see bootstrap documentation for available options. * * '''Example''' * {{{ <script data-lift="FoBo.Bs4Comp.popoverAppendJs?id=#theId&options=placement:'left'"></script> }}} * * '''Result:''' This example will result in the following being appended to lift's page-script on page load function: * {{{ * $(function () { $('#theId').popover({placement:'left'}); }); ; * }}} * @since v2.0 */ def popoverAppendJs: CssSel = { var id = S.attr("id") openOr "id: NOT DEFINED!?" var options = S.attr("options") openOr "" S.appendJs(sch.popoverScript(id, options)) " *" #> "" } /** * This function inlines a tooltip action on a element in place of the snippet invocation. * @note If you are using Lift 3 you should use the AppendJs alternative. * * '''Snippet Params:''' * * - '''Param''' ''id'' - The element id * - '''Param''' ''options'' - The option string see bootstrap documentation for available options. * * '''Example''' Showing a example element using the tooltip and the actual snippet invocation * {{{ * <a id="aId" * title="The most powerful, most secure web framework available today. It simply rocks!" * href="http://liftweb.net/">Lift</a> * <script data-lift="FoBo.Bs4Comp.tooltip?id=#aId&options=placement:'bottom'"></script> * }}} * * '''Result:''' The script tag with the snippet invocation will result in the following being injected in place of the snippet invocation: * {{{ * <script type="text/javascript"> * // <![CDATA[ * $(function () { $('#aId').tooltip({placement:'bottom'}); }); ; * // ]]> * </script> * }}} * @see [[net.liftmodules.fobobs4.lib.BootstrapSH.tooltipScript]] * @since v2.0 */ def tooltip: CssSel = { var id = S.attr("id") openOr "id: NOT DEFINED!?" var options = S.attr("options") openOr "" " *" #> JsCmds.Script(sch.tooltipScript(id, options)) } /** * '''Lift 3 alternativ''' -- This function sets a tooltip action on a element by appending to lift's page-script on page load function. * * '''Snippet Params:''' * * - '''Param''' ''id'' - The element id * - '''Param''' ''options'' - The option string see bootstrap documentation for available options. * * '''Example''' Showing a example element using the tooltip and the actual snippet invocation * {{{ * <a id="aId" * title="The most powerful, most secure web framework available today. It simply rocks!" * href="http://liftweb.net/">Lift</a> * <script data-lift="FoBo.Bs4Comp.tooltip?id=#aId&options=placement:'bottom'"></script> * }}} * * '''Result:''' The script tag with the snippet invocation will result in the following being * injected in the lift page script : * {{{ * $(function () { $('#aId').tooltip({placement:'bottom'}); }); * }}} * @since v2.0 */ def tooltipAppendJs: CssSel = { var id = S.attr("id") openOr "id: NOT DEFINED!?" var options = S.attr("options") openOr "" S.appendJs(sch.tooltipScript(id, options)) " *" #> "" } /** * This function prevents the default action on a popover element. * * '''Snippet Params:''' * * - '''Param''' ''on'' - The element id or something more general like the default value a[rel=popover] * * '''Example''' * {{{ <script data-lift="FoBo.Bs4Comp.popoverPreventDefault?on=#theId"></script> }}} * * '''Result:''' This example will result in the following being injected in place of the snippet invocation: * {{{ * <script type="text/javascript"> * // <![CDATA[ * $(function(){$('#theId').popover().click(function(e){e.preventDefault()});});; * // ]]> * </script> * }}} * @since v2.0 */ def popoverPreventDefault = { var on = onTest(S.attr("on") openOr "a[rel=popover]") " *" #> JsCmds.Script(sch.popoverPreventDefaultScript(on)) // sch.popoverPreventDefault(on) } /** * '''Lift 3 alternativ''' -- This function prevents the default action on a popover element. * * '''Snippet Params:''' * * - '''Param''' ''on'' - The element id or something more general like the default value a[rel=popover] * * '''Example''' * {{{ <script data-lift="FoBo.Bs4Comp.popoverPreventDefault?on=#theId"></script> }}} * * '''Result:''' This example will result in the following being appended to lift's * page-script document ready function: * {{{ * $(function(){$('#theId').popover().click(function(e){e.preventDefault()});});; * }}} * @since v2.0 */ def popoverPreventDefaultAppendJs: CssSel = { var on = onTest(S.attr("on") openOr "a[rel=popover]") S.appendJs(sch.popoverPreventDefaultScript(on)) " *" #> "" } /** * This function loads the dropdown activation * @note If you are using Lift 3 you should use the AppendJs alternative. * * '''Snippet Param:''' * * - '''Param''' ''on'' - The element id or class to activate dropdown on * * '''Example''' * {{{ * <head> * : * <script data-lift="FoBo.Bs4Comp.activateDropdown?on=.dropdown-toggle"></script> * <script data-lift="FoBo.Bs4ScriptHelper.registerLoadEventFactory"></script> * </head> * }}} * The load event factory has to be registered ones before any activation can be loaded. * * '''Result:''' This example will result in the following being injected in place of the snippet invocation: * {{{ * <script type="text/javascript"> * // <![CDATA[ * addLoadEvent(function() { $('.dropdown-toggle').dropdown(); });; * // ]]> * </script> * <script type="text/javascript">//registerLoadEventFactory script ...</script> * }}} * @since v2.0 */ def activateDropdown = { var on = S.attr("on") openOr "on: ELEMENT CLASS or ID NOT DEFINED!?" " *" #> JsCmds.Script(sch.activateDropdownScript(on)) } /** * '''Lift 3 alternativ''' -- This function loads the dropdown activation * * '''Snippet Param:''' * * - '''Param''' ''on'' - The element id or class to activate dropdown on * * '''Example''' * {{{ * <head> * : * <script data-lift="FoBo.Bs4Comp.activateDropdownAppendJs?on=.dropdown-toggle"></script> * <script data-lift="FoBo.ScriptHelper.registerLoadEventFactoryAppendGlobalJs"></script> * </head> * }}} * The load event factory has to be registered ones before any activation can be loaded. * * '''Result:''' This example will result in the following being appended to lift's * page-script document ready function: * {{{ * addLoadEvent(function() { $('.dropdown-toggle').dropdown(); });; * }}} * @since v2.0 */ def activateDropdownAppendJs = { var on = S.attr("on") openOr "on: ELEMENT CLASS or ID NOT DEFINED!?" S.appendJs(sch.activateDropdownScript(on)) " *" #> "" } private def onTest(on: String): String = on match { case "arelpop" => "a[rel=popover]" case "a[rel" => "a[rel=popover]" case _ => on } }
karma4u101/FoBo
Bootstrap/Bootstrap4/TwBs-Bootstrap4-API/src/main/scala/net/liftmodules/fobobs4/snippet/FoBo/Bs4Comp.scala
Scala
apache-2.0
11,338
case class SomeType[A](a: A)
hmemcpy/milewski-ctfp-pdf
src/content/1.6/code/scala/snippet28.scala
Scala
gpl-3.0
28
package edu.gemini.ags.gems import edu.gemini.catalog.api.{MagnitudeConstraints, RadiusConstraint} import edu.gemini.spModel.core._ import edu.gemini.spModel.core.AngleSyntax._ import edu.gemini.spModel.gems.{GemsGuideProbeGroup, GemsGuideStarType} import edu.gemini.shared.util.immutable.ScalaConverters._ import edu.gemini.shared.util.immutable.{Option => GOption} import scala.collection.JavaConverters._ import scalaz._ import Scalaz._ /** * Used to query catalogs and filter and categorize query results. * See OT-20 */ case class CatalogSearchCriterion(name: String, radiusConstraint: RadiusConstraint, magConstraint: MagnitudeConstraints, offset: Option[Offset] = None, posAngle: Option[Angle] = None) { /** * If offset and pos angle are specified, then we want the coordinates of the * offset position when rotated for the position angle. * * @param base the base position * @return the adjusted base position (base + (offset position rotated by position angle)) */ def adjustedBase(base: Coordinates): Coordinates = { (offset |@| posAngle) { (off, a) => val pa = a.toRadians (pa != 0.0) option { val p = off.p.degrees val q = off.q.degrees val cosa = Math.cos(pa) val sina = Math.sin(pa) val ra = p * cosa + q * sina val dec = -p * sina + q * cosa val raAngle = base.ra.offset(Angle.fromDegrees(ra)) val decAngle = base.dec.offset(Angle.fromDegrees(dec)) Coordinates(raAngle, decAngle._1) } }.flatten | base } /** * If there is an offset but there isn't a posAngle, then we have to adjust the * search radius to take into account any position angle. That means the * outer limit increases by the distance from the base to the offset and the * inner limit decreases by the same distance (never less than 0 though). * * @return the (possibly ) adjusted radius limits */ def adjustedLimits: RadiusConstraint = if (offset.isDefined && posAngle.isEmpty) { radiusConstraint.adjust(offset.get) } else { radiusConstraint } /** * Sets the offset to a specific value. */ case class Matcher(adjBase: Coordinates, adjLimits: RadiusConstraint) { /** * @param obj the SiderealTarget to match * @return true if the object matches the magnitude and radius limits */ def matches(obj: SiderealTarget): Boolean = matches(obj.magnitudes) && matches(obj.coordinates) private def matches(coords: Coordinates): Boolean = { val distance = Coordinates.difference(adjBase, coords).distance val minRadius = adjLimits.minLimit val maxRadius = adjLimits.maxLimit distance >= minRadius && distance <= maxRadius } private def matches(magList: List[Magnitude]): Boolean = magList.exists(matches) private def matches(mag: Magnitude): Boolean = magConstraint.contains(mag) } /** * This can be used as a predicate to filter on a List[SiderealTarget]. * * @param base the base position * @return a new Matcher for the given base position */ def matcher(base: Coordinates): Matcher = Matcher(adjustedBase(base), adjustedLimits) } /** * See OT-24 */ case class GemsCatalogSearchCriterion(key: GemsCatalogSearchKey, criterion: CatalogSearchCriterion) /** * Results of a GeMS catalog search * See OT-24 */ case class GemsCatalogSearchResults(criterion: GemsCatalogSearchCriterion, results: List[SiderealTarget]) { // Constructors for Java def this(results: java.util.List[SiderealTarget], criterion: GemsCatalogSearchCriterion) = this(criterion, results.asScala.toList) def resultsAsJava: java.util.List[SiderealTarget] = new java.util.ArrayList[SiderealTarget](results.asJava) def targetAt(i: Int): GOption[SiderealTarget] = results.lift(i).asGeminiOpt } /** * Represents the GeMS catalog star options * See OT-24 */ case class GemsCatalogSearchKey(starType: GemsGuideStarType, group: GemsGuideProbeGroup)
spakzad/ocs
bundle/edu.gemini.ags/src/main/scala/edu/gemini/ags/gems/CatalogSearchCriterion.scala
Scala
bsd-3-clause
3,993
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.computations import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtOptionalInteger} import uk.gov.hmrc.ct.computations.calculations.MachineryAndPlantCalculator import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever case class CP91(value: Option[Int]) extends CtBoxIdentifier(name = "Balancing charge") with CtOptionalInteger object CP91 extends Calculated[CP91, ComputationsBoxRetriever] with MachineryAndPlantCalculator { override def calculate(fieldValueRetriever: ComputationsBoxRetriever): CP91 = { computeBalancingCharge(cpq8 = fieldValueRetriever.cpQ8(), cp78 = fieldValueRetriever.cp78(), cp82 = fieldValueRetriever.cp82(), cp83 = fieldValueRetriever.cp83(), cp84 = fieldValueRetriever.cp84(), cp88 = fieldValueRetriever.cp88(), cp97 = fieldValueRetriever.cp97(), cp666 = fieldValueRetriever.cp666(), cp667 = fieldValueRetriever.cp667(), cp672 = fieldValueRetriever.cp672(), cp673 = fieldValueRetriever.cp673(), cp674 = fieldValueRetriever.cp674(), cpAux1 = fieldValueRetriever.cpAux1(), cpAux2 = fieldValueRetriever.cpAux2(), cpAux3 = fieldValueRetriever.cpAux3() ) } }
hmrc/ct-calculations
src/main/scala/uk/gov/hmrc/ct/computations/CP91.scala
Scala
apache-2.0
2,122
package com.lucidchart.threadflow.play import com.lucidchart.threadflow.FlowStore import _root_.play.api.mvc.RequestHeader class FlowFilter(store: FlowStore) extends ActionSideEffect[({type R[_] = RequestHeader})#R] { protected[this] def headerName = "X-Flow-Id" protected[this] def `do`[A](request: RequestHeader) = { store.set(request.headers.get(headerName).orNull) } }
lucidsoftware/thread-flow
thread-flow-play/src/main/scala/com/lucidchart/threadflow/play/FlowFilter.scala
Scala
apache-2.0
388
package com.fsist.safepickle import org.scalatest.FunSuite class TreePickleWriterTest extends FunSuite { def writer = WrapperBackend.writer() def testWriting(value: Wrapper): Unit = { val written = writer.write(value).result() assert(value == written, "Survived roundtrip") } test("Write primitive values") { testWriting(StringWrapper("foobar")) testWriting(IntWrapper(123)) testWriting(FloatWrapper(1.2f)) testWriting(DoubleWrapper(1.2d)) testWriting(LongWrapper(-123L)) testWriting(NullWrapper) } test("Write compound values") { testWriting( ArrayWrapper(Seq( StringWrapper("foo"), ArrayWrapper(Seq( IntWrapper(1), IntWrapper(2), StringWrapper("bar") )), ObjectWrapper(Map( "a" -> StringWrapper("a"), "b" -> ObjectWrapper(Map( "c" -> ArrayWrapper(Seq( FloatWrapper(1.23f) )) )) )) )) ) testWriting( ObjectWrapper(Map( "arr" -> ArrayWrapper(Seq( StringWrapper("foo"), ArrayWrapper(Seq( IntWrapper(1), IntWrapper(2), StringWrapper("bar") )), ObjectWrapper(Map( "a" -> StringWrapper("a"), "b" -> ObjectWrapper(Map( "c" -> ArrayWrapper(Seq( FloatWrapper(1.23f) )) )) )) )) )) ) } }
fsist/safepickle
src/test/scala/com/fsist/safepickle/TreePickleWriterTest.scala
Scala
apache-2.0
1,448
/** * Swaggy Jenkins * Jenkins API clients generated from Swagger / Open API specification * * The version of the OpenAPI document: 1.1.2-pre.0 * Contact: [email protected] * * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * https://openapi-generator.tech * Do not edit the class manually. */ package org.openapitools.client.model case class Pipeline ( `class`: Option[String] = None, organization: Option[String] = None, name: Option[String] = None, displayName: Option[String] = None, fullName: Option[String] = None, weatherScore: Option[Integer] = None, estimatedDurationInMillis: Option[Integer] = None, latestRun: Option[PipelinelatestRun] = None )
cliffano/swaggy-jenkins
clients/scala-httpclient-deprecated/generated/src/main/scala/org/openapitools/client/model/Pipeline.scala
Scala
mit
729
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.hive.orc import org.apache.hadoop.fs.Path import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.sql.sources.HadoopFsRelationTest import org.apache.spark.sql.types._ class OrcHadoopFsRelationSuite extends HadoopFsRelationTest { override val dataSourceName: String = classOf[DefaultSource].getCanonicalName import sqlContext._ import sqlContext.implicits._ test("save()/load() - partitioned table - simple queries - partition columns in data") { withTempDir { file => val basePath = new Path(file.getCanonicalPath) val fs = basePath.getFileSystem(SparkHadoopUtil.get.conf) val qualifiedBasePath = fs.makeQualified(basePath) for (p1 <- 1 to 2; p2 <- Seq("foo", "bar")) { val partitionDir = new Path(qualifiedBasePath, s"p1=$p1/p2=$p2") sparkContext .parallelize(for (i <- 1 to 3) yield (i, s"val_$i", p1)) .toDF("a", "b", "p1") .write .format("orc") .save(partitionDir.toString) } val dataSchemaWithPartition = StructType(dataSchema.fields :+ StructField("p1", IntegerType, nullable = true)) checkQueries( load( source = dataSourceName, options = Map( "path" -> file.getCanonicalPath, "dataSchema" -> dataSchemaWithPartition.json))) } } }
andrewor14/iolap
sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcHadoopFsRelationSuite.scala
Scala
apache-2.0
2,178
/* * Scala.js (https://www.scala-js.org/) * * Copyright EPFL. * * Licensed under Apache License 2.0 * (https://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package org.scalajs.linker /** Kind of module structure emitted for the Scala.js output. */ abstract class ModuleKind private () object ModuleKind { /** All the available module kinds. * * They are listed in decreasing order of "importance", as judged by * whoever maintains the back-ends. */ val All: List[ModuleKind] = List( NoModule, ESModule, CommonJSModule) /** No module structure. * * With this module kind, exports are stored on the global object. * * Imports are not supported. */ case object NoModule extends ModuleKind /** An ECMAScript 2015 module. * * Scala.js imports and exports directly map to `import` and `export` * clauses in the ES module. */ case object ESModule extends ModuleKind /** A CommonJS module (notably used by Node.js). * * Imported modules are fetched with `require`. Exports go to the `exports` * module-global variable. */ case object CommonJSModule extends ModuleKind }
nicolasstucki/scala-js
linker/shared/src/main/scala/org/scalajs/linker/ModuleKind.scala
Scala
apache-2.0
1,280
package org.http4s package parser import cats.data.NonEmptyList import java.nio.charset.{Charset, StandardCharsets} import org.http4s._ import org.http4s.headers.Origin import org.http4s.internal.parboiled2._ trait OriginHeader { def ORIGIN(value: String): ParseResult[Origin] = new OriginParser(value).parse private class OriginParser(value: String) extends Http4sHeaderParser[Origin](value) with Rfc3986Parser { override def charset: Charset = StandardCharsets.ISO_8859_1 def entry: Rule1[Origin] = rule { nullEntry | hostListEntry } // The spec states that an Origin may be the string "null": // http://tools.ietf.org/html/rfc6454#section-7 // // However, this MDN article states that it may be the empty string: // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin // // Although the MDN article is possibly wrong, // it seems likely we could get either case, // so we read both as Origin.Null and re-serialize it as "null": def nullEntry: Rule1[Origin] = rule { (str("") ~ EOI | str("null") ~ EOI) ~> { () => Origin.Null } } def hostListEntry: Rule1[Origin] = rule { (host ~ zeroOrMore(" " ~ host)) ~> { (head: Origin.Host, tail: collection.Seq[Origin.Host]) => Origin.HostList(NonEmptyList(head, tail.toList)) } } def host: Rule1[Origin.Host] = rule { (scheme ~ "://" ~ Host ~ Port) ~> { (s, h, p) => Origin.Host(s, h, p) } } } }
aeons/http4s
core/src/main/scala/org/http4s/parser/OriginHeader.scala
Scala
apache-2.0
1,526
package org.photon.protocol package object dofus { implicit class SerializablesExt[T <: StringSerializable](val c: Seq[T]) extends AnyVal { def serialize(out: StringBuilder, sep: String = "", start: String = "", end: String = "") { out ++= start var first: Boolean = true for (s <- c) { if (first) first = false else out ++= sep s.serialize(out) } } } implicit class StringBuilderExt(val self: StringBuilder) extends AnyVal { def append(bytes: Array[Byte], start: Int = 0, end: Int = -1): self.type = { var i = 0 val max = if (end < 0) bytes.length else end while (i <= max) { self.append(bytes(i).toChar) i += 1 } self } } def hex(c: Int) = if (c < 0) "-1" else Integer.toString(c, 16) def btoi(b: Boolean) = if (b) "1" else "0" def itob(i: String) = i == "1" object Int { def unapply(s: String): Option[Int] = try { Some(s.toInt) } catch { case _: NumberFormatException => None } } object Long { def unapply(s: String): Option[Long] = try { Some(s.toLong) } catch { case _: NumberFormatException => None } } }
Emudofus/Photon
protocol/main/src/org/photon/protocol/dofus/package.scala
Scala
mit
1,123
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.python import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import org.apache.spark.api.python.PythonEvalType import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, Filter, LogicalPlan, Project} import org.apache.spark.sql.catalyst.rules.Rule /** * Extracts all the Python UDFs in logical aggregate, which depends on aggregate expression or * grouping key, or doesn't depend on any above expressions, evaluate them after aggregate. */ object ExtractPythonUDFFromAggregate extends Rule[LogicalPlan] { /** * Returns whether the expression could only be evaluated within aggregate. */ private def belongAggregate(e: Expression, agg: Aggregate): Boolean = { e.isInstanceOf[AggregateExpression] || PythonUDF.isGroupedAggPandasUDF(e) || agg.groupingExpressions.exists(_.semanticEquals(e)) } private def hasPythonUdfOverAggregate(expr: Expression, agg: Aggregate): Boolean = { expr.find { e => PythonUDF.isScalarPythonUDF(e) && (e.references.isEmpty || e.find(belongAggregate(_, agg)).isDefined) }.isDefined } private def extract(agg: Aggregate): LogicalPlan = { val projList = new ArrayBuffer[NamedExpression]() val aggExpr = new ArrayBuffer[NamedExpression]() agg.aggregateExpressions.foreach { expr => if (hasPythonUdfOverAggregate(expr, agg)) { // Python UDF can only be evaluated after aggregate val newE = expr transformDown { case e: Expression if belongAggregate(e, agg) => val alias = e match { case a: NamedExpression => a case o => Alias(e, "agg")() } aggExpr += alias alias.toAttribute } projList += newE.asInstanceOf[NamedExpression] } else { aggExpr += expr projList += expr.toAttribute } } // There is no Python UDF over aggregate expression Project(projList, agg.copy(aggregateExpressions = aggExpr)) } def apply(plan: LogicalPlan): LogicalPlan = plan transformUp { case agg: Aggregate if agg.aggregateExpressions.exists(hasPythonUdfOverAggregate(_, agg)) => extract(agg) } } /** * Extracts PythonUDFs from operators, rewriting the query plan so that the UDF can be evaluated * alone in a batch. * * Only extracts the PythonUDFs that could be evaluated in Python (the single child is PythonUDFs * or all the children could be evaluated in JVM). * * This has the limitation that the input to the Python UDF is not allowed include attributes from * multiple child operators. */ object ExtractPythonUDFs extends Rule[LogicalPlan] with PredicateHelper { private type EvalType = Int private type EvalTypeChecker = EvalType => Boolean private def hasScalarPythonUDF(e: Expression): Boolean = { e.find(PythonUDF.isScalarPythonUDF).isDefined } private def canEvaluateInPython(e: PythonUDF): Boolean = { e.children match { // single PythonUDF child could be chained and evaluated in Python case Seq(u: PythonUDF) => e.evalType == u.evalType && canEvaluateInPython(u) // Python UDF can't be evaluated directly in JVM case children => !children.exists(hasScalarPythonUDF) } } private def collectEvaluableUDFsFromExpressions(expressions: Seq[Expression]): Seq[PythonUDF] = { // Eval type checker is set once when we find the first evaluable UDF and its value // shouldn't change later. // Used to check if subsequent UDFs are of the same type as the first UDF. (since we can only // extract UDFs of the same eval type) var evalTypeChecker: Option[EvalTypeChecker] = None def collectEvaluableUDFs(expr: Expression): Seq[PythonUDF] = expr match { case udf: PythonUDF if PythonUDF.isScalarPythonUDF(udf) && canEvaluateInPython(udf) && evalTypeChecker.isEmpty => evalTypeChecker = Some((otherEvalType: EvalType) => otherEvalType == udf.evalType) Seq(udf) case udf: PythonUDF if PythonUDF.isScalarPythonUDF(udf) && canEvaluateInPython(udf) && evalTypeChecker.get(udf.evalType) => Seq(udf) case e => e.children.flatMap(collectEvaluableUDFs) } expressions.flatMap(collectEvaluableUDFs) } def apply(plan: LogicalPlan): LogicalPlan = plan transformUp { case plan: LogicalPlan => extract(plan) } /** * Extract all the PythonUDFs from the current operator and evaluate them before the operator. */ private def extract(plan: LogicalPlan): LogicalPlan = { val udfs = collectEvaluableUDFsFromExpressions(plan.expressions) // ignore the PythonUDF that come from second/third aggregate, which is not used .filter(udf => udf.references.subsetOf(plan.inputSet)) if (udfs.isEmpty) { // If there aren't any, we are done. plan } else { val inputsForPlan = plan.references ++ plan.outputSet val prunedChildren = plan.children.map { child => val allNeededOutput = inputsForPlan.intersect(child.outputSet).toSeq if (allNeededOutput.length != child.output.length) { Project(allNeededOutput, child) } else { child } } val planWithNewChildren = plan.withNewChildren(prunedChildren) val attributeMap = mutable.HashMap[PythonUDF, Expression]() val splitFilter = trySplitFilter(planWithNewChildren) // Rewrite the child that has the input required for the UDF val newChildren = splitFilter.children.map { child => // Pick the UDF we are going to evaluate val validUdfs = udfs.filter { udf => // Check to make sure that the UDF can be evaluated with only the input of this child. udf.references.subsetOf(child.outputSet) } if (validUdfs.nonEmpty) { require( validUdfs.forall(PythonUDF.isScalarPythonUDF), "Can only extract scalar vectorized udf or sql batch udf") val resultAttrs = udfs.zipWithIndex.map { case (u, i) => AttributeReference(s"pythonUDF$i", u.dataType)() } val evaluation = validUdfs.partition( _.evalType == PythonEvalType.SQL_SCALAR_PANDAS_UDF ) match { case (vectorizedUdfs, plainUdfs) if plainUdfs.isEmpty => ArrowEvalPython(vectorizedUdfs, child.output ++ resultAttrs, child) case (vectorizedUdfs, plainUdfs) if vectorizedUdfs.isEmpty => BatchEvalPython(plainUdfs, child.output ++ resultAttrs, child) case _ => throw new AnalysisException( "Expected either Scalar Pandas UDFs or Batched UDFs but got both") } attributeMap ++= validUdfs.zip(resultAttrs) evaluation } else { child } } // Other cases are disallowed as they are ambiguous or would require a cartesian // product. udfs.filterNot(attributeMap.contains).foreach { udf => sys.error(s"Invalid PythonUDF $udf, requires attributes from more than one child.") } val rewritten = splitFilter.withNewChildren(newChildren).transformExpressions { case p: PythonUDF if attributeMap.contains(p) => attributeMap(p) } // extract remaining python UDFs recursively val newPlan = extract(rewritten) if (newPlan.output != plan.output) { // Trim away the new UDF value if it was only used for filtering or something. Project(plan.output, newPlan) } else { newPlan } } } // Split the original FilterExec to two FilterExecs. Only push down the first few predicates // that are all deterministic. private def trySplitFilter(plan: LogicalPlan): LogicalPlan = { plan match { case filter: Filter => val (candidates, nonDeterministic) = splitConjunctivePredicates(filter.condition).partition(_.deterministic) val (pushDown, rest) = candidates.partition(!hasScalarPythonUDF(_)) if (pushDown.nonEmpty) { val newChild = Filter(pushDown.reduceLeft(And), filter.child) Filter((rest ++ nonDeterministic).reduceLeft(And), newChild) } else { filter } case o => o } } }
michalsenkyr/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/python/ExtractPythonUDFs.scala
Scala
apache-2.0
9,246
package io.mpjsons.impl.util.reflection import java.lang.reflect.Field import io.mpjsons.impl.JsonInnerException import io.mpjsons.impl.util.TypesUtil import scala.reflect.runtime.universe._ case class FieldWithTypeInfo(field: Field, tpe: Type, nullable: Boolean) /** * ReflectionUtilNoCache wrapper that caches results of function calls. * @author Marcin Pieciukiewicz */ object ReflectionUtil { private var getAllAccessibleFieldsCache: Map[Type, Array[FieldWithTypeInfo]] = Map.empty private var getAccessibleFieldCache: Map[(Type, String), FieldWithTypeInfo] = Map.empty /** * Returns the array containing all Fields declared by given class or in its superclasses. * @param tpe class from which the fields should be retrieved * @return array containing all defined fields in class */ def getAllAccessibleFields(tpe: Type): Array[FieldWithTypeInfo] = { getAllAccessibleFieldsCache.getOrElse(tpe, { val allFields: Array[FieldWithTypeInfo] = ReflectionUtilNoCache.getAllAccessibleFields(tpe) getAllAccessibleFieldsCache += tpe -> allFields allFields }) } }
marpiec/mpjsons
src/main/scala/io/mpjsons/impl/util/reflection/ReflectionUtil.scala
Scala
apache-2.0
1,118
/* Copyright 2011 Ben Biddington Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.coriander.oauth.core.cryptography.keys import java.io.{IOException, File} import java.security.interfaces.RSAPrivateKey import java.security.{KeyFactory, PrivateKey} import java.security.spec.PKCS8EncodedKeySpec import scala.io.Source._ import org.apache.commons.codec.binary.Base64._ object RsaPrivateKeyReader { def read(file : String): PrivateKey = { val allLines = fromFile(new File(file).getCanonicalPath).getLines.toList val START_TOKEN = "-----BEGIN PRIVATE KEY-----" val END_TOKEN = "-----END PRIVATE KEY-----" val isPKCS8Format = allLines.count((line : String) => line.trim.matches(START_TOKEN)) == 1 if (false == isPKCS8Format) throw new IOException("The supplied file does not appear to be in PKCS8 format.") val justTheKey = linesBetween(file, START_TOKEN, END_TOKEN) toPrivateKey(justTheKey.mkString) } private def linesBetween(file : String, start : String, end : String) = { val lines = fromFile(new File(file).getCanonicalPath).getLines.toList var done = false var started = false lines.takeWhile((line : String) => { if (false == started) { started = line.trim.matches(start) } done = line.trim.matches(end) started && !done }).slice(1, lines.size) } private def toPrivateKey(encodedKey : String) = { val privSpec = new PKCS8EncodedKeySpec(decodeBase64(encodedKey)) val keyFactory = KeyFactory.getInstance("RSA"); keyFactory.generatePrivate(privSpec).asInstanceOf[RSAPrivateKey] } }
ben-biddington/Coriander.OAuth
src/org/coriander/oauth/core/cryptography/keys/RsaPrivateKeyReader.scala
Scala
apache-2.0
2,089
package io.sqooba.oss.timeseries.archive import io.sqooba.oss.timeseries.TimeSeries import io.sqooba.oss.timeseries.immutable.TSEntry import org.scalatest.{FlatSpec, Matchers} class GorillaBlockSpec extends FlatSpec with Matchers { val tsDouble = TimeSeries( Seq( TSEntry(1, 200.03, 100), TSEntry(50, 400.03, 100), TSEntry(77, 100.03, 100), TSEntry(200, 0.123456789, 100) ) ) val tsSampled = TimeSeries( Seq( TSEntry(1, 200.03, 100), TSEntry(101, 400.03, 100), TSEntry(201, 100.03, 100), TSEntry(301, 0.123456789, 100) ) ) val tsLong = TimeSeries( Seq( TSEntry(1, 1L, 100), TSEntry(50, Long.MaxValue, 100), TSEntry(77, Long.MinValue, 100), TSEntry(200, 123456789L, 100), TSEntry(1234123234L, 87767666566L, 100) ) ) "GorillaBlock" should "compress and again decompress a timeseries with tuples" in { GorillaBlock .compress(tsDouble.entries.toStream) .decompress shouldBe tsDouble.entries } it should "compress and again decompress for converted long values with tuples" in { GorillaBlock .compress(tsLong.map(_.toDouble).entries.toStream) .decompress shouldBe tsLong.entries } it should "compress and again decompress a sampled timeseries" in { GorillaBlock .compressSampled(tsSampled.entries.toStream, 100L) .decompress shouldBe tsSampled.entries } it should "throw if the byte arrays are not of same length" in { val TupleGorillaBlock(valueBytes, validityBytes) = GorillaBlock.compress(tsDouble.entries.toStream).asInstanceOf[TupleGorillaBlock] an[IllegalArgumentException] should be thrownBy ( GorillaBlock.fromTupleArrays(valueBytes.drop(8), validityBytes).decompress ) } it should "throw if a corrupt byte array is given for decompression of sampled" in { an[IllegalArgumentException] should be thrownBy ( GorillaBlock.fromSampled(Array(1, 2, 3, 4, 5), 10).decompress ) } it should "throw if an empty stream is provided to compress" in { an[IllegalStateException] should be thrownBy GorillaBlock.compress(Stream.empty) } it should "throw if an empty stream is provided to compress sampled" in { an[IllegalStateException] should be thrownBy GorillaBlock.compressSampled(Stream.empty, 10) } it should "throw if a non-positive sampling rate is given to compress" in { an[IllegalArgumentException] should be thrownBy GorillaBlock.compressSampled(tsSampled.entries.toStream, 0) an[IllegalArgumentException] should be thrownBy GorillaBlock.compressSampled(tsSampled.entries.toStream, -100) } it should "throw if empty byte arrays are provided to decompress" in { an[IllegalArgumentException] should be thrownBy ( GorillaBlock.fromTupleArrays(Array.empty[Byte], Array.empty[Byte]).decompress ) } it should "throw if empty byte arrays are provided to decompress sampled" in { an[IllegalArgumentException] should be thrownBy ( GorillaBlock.fromSampled(Array.empty[Byte], 100).decompress ) } it should "throw if a non-positive sampling rate is given to the constructor" in { an[IllegalArgumentException] should be thrownBy GorillaBlock.fromSampled(Array(1, 2, 3), -100) } it should "throw if the builder is called without any added entries" in { an[IllegalStateException] should be thrownBy { GorillaBlock.newBuilder.result() } } it should "serialize a tuple gorilla block" in { val tuple @ TupleGorillaBlock(valueBytes, validityBytes) = GorillaBlock.compress(tsDouble.entries.toStream).asInstanceOf[TupleGorillaBlock] tuple.serialize.length shouldBe Integer.BYTES + valueBytes.length + validityBytes.length } it should "serialize a sampled gorilla block" in { val sampled @ SampledGorillaBlock(valueBytes, _) = GorillaBlock .compressSampled(tsDouble.entries.toStream, 10) .asInstanceOf[SampledGorillaBlock] sampled.serialize shouldBe valueBytes } }
Shastick/tslib
src/test/scala/io/sqooba/oss/timeseries/archive/GorillaBlockSpec.scala
Scala
mit
4,043
package ml.combust.mleap import sbt.Keys._ import sbt._ object MleapProject { lazy val aggregatedProjects: Seq[ProjectReference] = Seq(baseProject, tensor, tensorflow, bundleMl, bundleHdfs, core, runtime, xgboostRuntime, xgboostSpark, avro, sparkBase, sparkTestkit, spark, sparkExtension, executor, executorTestKit, grpc, grpcServer, repositoryS3, springBootServing, serving, databricksRuntime) var rootSettings = Release.settings ++ Common.buildSettings ++ Common.sonatypeSettings ++ Seq(publishArtifact := false) lazy val root = Project( id = "mleap", base = file("."), aggregate = aggregatedProjects ).settings(rootSettings) lazy val baseProject = Project( id = "mleap-base", base = file("mleap-base") ) lazy val tensor = Project( id = "mleap-tensor", base = file("mleap-tensor"), dependencies = Seq(baseProject) ) lazy val bundleMl = Project( id = "bundle-ml", base = file("bundle-ml"), dependencies = Seq(baseProject, tensor) ) lazy val bundleHdfs = Project( id = "bundle-hdfs", base = file("bundle-hdfs"), dependencies = Seq(bundleMl) ) lazy val core = Project( id = "mleap-core", base = file("mleap-core"), dependencies = Seq(baseProject, tensor) ) lazy val runtime = Project( id = "mleap-runtime", base = file("mleap-runtime"), dependencies = Seq(core, bundleMl) ) lazy val avro = Project( id = "mleap-avro", base = file("mleap-avro"), dependencies = Seq(runtime) ) lazy val sparkBase = Project( id = "mleap-spark-base", base = file("mleap-spark-base"), dependencies = Seq(runtime, bundleHdfs) ) lazy val sparkTestkit = Project( id = "mleap-spark-testkit", base = file("mleap-spark-testkit"), dependencies = Seq(sparkBase) ) lazy val spark = Project( id = "mleap-spark", base = file("mleap-spark"), dependencies = Seq(sparkBase, sparkTestkit % "test") ) lazy val sparkExtension = Project( id = "mleap-spark-extension", base = file("mleap-spark-extension"), dependencies = Seq(spark, sparkTestkit % "test") ) lazy val tensorflow = Project( id = "mleap-tensorflow", base = file("mleap-tensorflow"), dependencies = Seq(runtime) ) lazy val xgboostRuntime = Project( id = "mleap-xgboost-runtime", base = file("mleap-xgboost-runtime"), dependencies = Seq( runtime, sparkTestkit % "test") ) lazy val xgboostSpark = Project( id = "mleap-xgboost-spark", base = file("mleap-xgboost-spark"), dependencies = Seq(sparkBase % "provided", xgboostRuntime % "test", spark % "test", sparkTestkit % "test") ) lazy val serving = Project( id = "mleap-serving", base = file("mleap-serving"), dependencies = Seq(springBootServing, grpcServer) ) lazy val executor = Project( id = "mleap-executor", base = file("mleap-executor"), dependencies = Seq(runtime) ) lazy val executorTestKit = Project( id = "mleap-executor-testkit", base = file("mleap-executor-testkit"), dependencies = Seq(executor) ) lazy val executorTests = Project( id = "mleap-executor-tests", base = file("mleap-executor-tests"), dependencies = Seq(executor, executorTestKit % "test") ) lazy val grpc = Project( id = "mleap-grpc", base = file("mleap-grpc"), dependencies = Seq(`executor`) ) lazy val grpcServer = Project( id = "mleap-grpc-server", base = file("mleap-grpc-server"), dependencies = Seq(grpc, executorTestKit % "test") ) lazy val repositoryS3 = Project( id = "mleap-repository-s3", base = file("mleap-repository-s3"), dependencies = Seq(executor) ) lazy val springBootServing = Project( id = "mleap-spring-boot", base = file("mleap-spring-boot"), dependencies = Seq(executor) ) lazy val benchmark = Project( id = "mleap-benchmark", base = file("mleap-benchmark"), dependencies = Seq(runtime, spark, avro) ) // Create underlying fat jar project as per: https://github.com/sbt/sbt-assembly#q-despite-the-concerned-friends-i-still-want-publish-fat-jars-what-advice-do-you-have lazy val databricksRuntimeFat = Project( id = "mleap-databricks-runtime-fat", base = file("mleap-databricks-runtime-fat"), dependencies = Seq(baseProject, tensor, core, runtime, bundleMl, spark, sparkExtension, tensorflow, xgboostRuntime, xgboostSpark) ).settings(excludeDependencies ++= Seq( SbtExclusionRule("org.tensorflow"), SbtExclusionRule("org.apache.spark"), SbtExclusionRule("ml.dmlc") )) lazy val databricksRuntime = Project( id = "mleap-databricks-runtime", base = file("mleap-databricks-runtime"), dependencies = Seq() ) lazy val databricksRuntimeTestkit = Project( id = "mleap-databricks-runtime-testkit", base = file("mleap-databricks-runtime-testkit"), dependencies = Seq(spark % "provided", sparkExtension % "provided", xgboostSpark % "provided", tensorflow % "provided") ) }
combust-ml/mleap
project/MleapProject.scala
Scala
apache-2.0
5,207
package com.github.tanacasino.nnp import org.scalatest._ class NNP10Spec extends FunSpec with Matchers with NNP10 { describe("Ninety-Nine Scala Problems") { // P01 (*) Find the last element of a list. describe("P01") { it("Find the last element of a list.") { last(List(1, 1, 2, 3, 5, 8)) should be (8) last1(List(1, 1, 2, 3, 5, 8)) should be (8) } } // P02 (*) Find the last but one element of a list. describe("P02") { it("Find the last but one element of a list.") { penultimate(List(1, 1, 2, 3, 5, 8)) should be (5) penultimate2(List(1, 1, 2, 3, 5, 8)) should be (5) } } // P03 (*) Find the Kth element of a list. describe("P03") { it("Find the Kth element of a list.") { nth(2, List(1, 1, 2, 3, 5, 8)) should be (2) } } // P04 (*) Find the number of elements of a list. describe("P04") { it("Find the number of elements of a list.") { length(List(1, 1, 2, 3, 5, 8)) should be (6) length2(List(1, 1, 2, 3, 5, 8)) should be (6) } } // P05 (*) Reverse a list. describe("P05") { it("Reverse a list") { reverse(List(1, 1, 2, 3, 5, 8)) should be (List(8, 5, 3, 2, 1, 1)) } } // P06 (*) Find out whether a list is a palindrome. describe("P06") { it("Find out whether a list is a palindrome.") { isPalindrome(List(1, 2, 3, 2, 1)) should be (right = true) } } // P07 (**) Flatten a nested list structure. describe("P07") { it("Flatten a nested list structure.") { val nested1 = List(List(1, 1), 2, List(3, List(5, 8))) flattenByCasino(nested1) should be (List(1, 1, 2, 3, 5, 8)) flattenByK(nested1) should be (List(1, 1, 2, 3, 5, 8)) flatten2(nested1) should be (List(1, 1, 2, 3, 5, 8)) flatten3(nested1) should be (List(1, 1, 2, 3, 5, 8)) val nested2 = List(List(1, 1), 2, List(3, List(5, 8)), List(15, List(11, 12), List(13, List(14, 15)))) flattenByCasino(nested2) should be (List(1, 1, 2, 3, 5, 8, 15, 11, 12, 13, 14, 15)) flatten2(nested2) should be (List(1, 1, 2, 3, 5, 8, 15, 11, 12, 13, 14, 15)) //flattenByK(nested2) should be (List(1, 1, 2, 3, 5, 8, 15, 11, 12, 13, 14, 15)) } } // P08 (**) Eliminate consecutive duplicates of list elements. // If a list contains repeated elements they should be replaced with a single copy of the element. // The order of the elements should not be changed. describe("P08") { it("Eliminate consecutive duplicates of list elements.") { compress(List('a, 'a, 'a, 'a, 'b, 'c, 'c, 'a, 'a, 'd, 'e, 'e, 'e, 'e)) should be (List('a, 'b, 'c, 'a, 'd, 'e)) compress2(List('a, 'a, 'a, 'a, 'b, 'c, 'c, 'a, 'a, 'd, 'e, 'e, 'e, 'e)) should be (List('a, 'b, 'c, 'a, 'd, 'e)) } } // P09 (**) Pack consecutive duplicates of list elements into sublists. // If a list contains repeated elements they should be placed in separate sublists. describe("P09") { it("Pack consecutive duplicates of list elements into sublists.") { pack(List('a, 'a, 'a, 'a, 'b, 'c, 'c, 'a, 'a, 'd, 'e, 'e, 'e, 'e)) should be (List(List('a, 'a, 'a, 'a), List('b), List('c, 'c), List('a, 'a), List('d), List('e, 'e, 'e, 'e))) pack2(List('a, 'a, 'a, 'a, 'b, 'c, 'c, 'a, 'a, 'd, 'e, 'e, 'e, 'e)) should be (List(List('a, 'a, 'a, 'a), List('b), List('c, 'c), List('a, 'a), List('d), List('e, 'e, 'e, 'e))) } } // P10 (*) Run-length encoding of a list. // Use the result of problem P09 to implement the so-called run-length encoding data compression method. // Consecutive duplicates of elements are encoded as tuples (N, E) where N is the number of duplicates of the element E. describe("P10") { it("Run-length encoding of a list.") { encode2(List('a, 'a, 'a, 'a, 'b, 'c, 'c, 'a, 'a, 'd, 'e, 'e, 'e, 'e)) should be (List((4,'a), (1,'b), (2,'c), (2,'a), (1,'d), (4,'e))) } } } }
tanacasino/learning-scala
src/test/scala/com/github/tanacasino/nnp/NNP10Spec.scala
Scala
mit
4,037
package filters import akka.stream.Materializer import javax.inject._ import models.Worker import play.api.Logger import play.api.libs.typedmap.{TypedKey, TypedMap} import play.api.mvc._ import services.WorkerStore import scala.concurrent.{ExecutionContext, Future} @Singleton class TokenAuthorizationFilter @Inject()( implicit override val mat: Materializer, exec: ExecutionContext) extends Filter { import TokenAuthorizationFilter._ private val logger = Logger(this.getClass) override def apply(nextFilter: RequestHeader => Future[Result]) (requestHeader: RequestHeader): Future[Result] = { // FIXME all paths should be authorized, not just worker routes if (requestHeader.path.startsWith("/api/v1/work")) { requestHeader.headers.get(AUTH_TOKEN_HEADER) match { case Some(token) => WorkerStore.workers.get(token) match { case Some(worker) => val attrs: TypedMap = requestHeader.attrs + (WORKER_ATTR -> worker) nextFilter(requestHeader.withAttrs(attrs)) case None => logger.info(s"Unrecognized API token $token") logger.debug(s"Workers ${WorkerStore.workers}") Future(Results.Unauthorized) } case None => logger.info("Missing API token") Future(Results.Unauthorized) } } else { nextFilter(requestHeader) } } } object TokenAuthorizationFilter { val AUTH_TOKEN_HEADER = "X-Auth-Token" val WORKER_ATTR = TypedKey[Worker]("worker") }
kuhnuri/kuhnuri-queue
app/filters/TokenAuthorizationFilter.scala
Scala
apache-2.0
1,636
package com.tuplejump.calliope import org.scalatest.{BeforeAndAfterAll, FunSpec} import org.scalatest.matchers.{MustMatchers, ShouldMatchers} import java.nio.ByteBuffer import com.tuplejump.calliope.utils.RichByteBuffer import RichByteBuffer._ import org.apache.spark.SparkContext import Implicits._ import com.tuplejump.calliope.Types.{CQLRowMap, CQLRowKeyMap, ThriftRowMap, ThriftRowKey} /** * To run this test you need a Cassandra cluster up and running * and run the data-script.cli in it to create the data. * */ class ThriftCassandraRDDSpec extends FunSpec with BeforeAndAfterAll with ShouldMatchers with MustMatchers { val CASSANDRA_NODE_COUNT = 3 val CASSANDRA_NODE_LOCATIONS = List("127.0.0.1", "127.0.0.2", "127.0.0.3") val TEST_KEYSPACE = "casSparkTest" val TEST_INPUT_COLUMN_FAMILY = "Words" info("Describes the functionality provided by the Cassandra RDD") val sc = new SparkContext("local", "castest") describe("Thrift Cassandra RDD") { it("should be able to build and process RDD[K,V]") { val cas = CasBuilder.thrift.withColumnFamily(TEST_KEYSPACE, TEST_INPUT_COLUMN_FAMILY) val casrdd = sc.thriftCassandra[String, Map[String, String]](cas) //This is same as calling, //val casrdd = sc.cassandra[String, Map[String, String]](TEST_KEYSPACE, TEST_INPUT_COLUMN_FAMILY) val result = casrdd.collect().toMap val resultKeys = result.keys resultKeys must be(Set("3musk001", "thelostworld001", "3musk003", "3musk002", "thelostworld002")) } it("should be able to select certain columns from Cassandra to build RDD") { val cas = CasBuilder.thrift.withColumnFamily(TEST_KEYSPACE, TEST_INPUT_COLUMN_FAMILY).columns("book") val casrdd = sc.thriftCassandra[String, Map[String, String]](cas) //This is same as calling, //val casrdd = sc.cassandra[String, Map[String, String]](TEST_KEYSPACE, TEST_INPUT_COLUMN_FAMILY) val result = casrdd.take(1).head val resultValues = result._2.keySet resultValues.size must be(1) } } override def afterAll() { sc.stop() } } private object ThriftCRDDTransformers { import RichByteBuffer._ implicit def row2String(key: ThriftRowKey, row: ThriftRowMap): List[String] = { row.keys.toList } implicit def cql3Row2Mapss(keys: CQLRowKeyMap, values: CQLRowMap): (Map[String, String], Map[String, String]) = { (keys, values) } }
brenttheisen/calliope
src/test/scala/com/tuplejump/calliope/ThriftCassandraRDDSpec.scala
Scala
apache-2.0
2,425
package BIDMat class SparseMat[@specialized(Double,Float) T] (nr: Int, nc: Int, var nnz0:Int, var ir:Array[Int], val jc:Array[Int], val data:Array[T]) (implicit manifest:Manifest[T], numeric:Numeric[T]) extends Mat(nr, nc) { override def nnz = nnz0 /* * Bounds-checked matrix access */ def apply(r0:Int, c0:Int):T = { val off = Mat.oneBased val r = r0 - off val c = c0 - off if (r < 0 || r >= nrows || c < 0 || c >= ncols) { throw new IndexOutOfBoundsException("("+(r+off)+","+(c+off)+") vs ("+nrows+","+ncols+")"); } else { get_(r, c); } } /* * Internal (unchecked) accessor */ def get_(r:Int, c:Int):T = { val ioff = Mat.ioneBased var ix = 0 if (ir != null) { ix = Mat.ibinsearch(r+ioff, ir, jc(c)-ioff, jc(c+1)-ioff) } else { ix = r+ioff - jc(c) } if (ix >= 0) data(ix) else numeric.zero } def indexOf2(a:T):(Int, Int) = { val off = Mat.oneBased val ioff = Mat.ioneBased val i = data.indexOf(a) var j = 0 while (jc(j)-ioff <= i) j += 1 (ir(i) - ioff + off, j-1+off) } def indexOf(a:T):Int = { val off = Mat.oneBased val (i,j) = indexOf2(a) i + (j-off)*nrows } /* * Update a matrix value, m(r,c) = v */ def update(r0:Int, c0:Int, v:T):T = { val off = Mat.oneBased val r = r0 - off val c = c0 - off if (r < 0 || r >= nrows || c < 0 || c >= ncols) { throw new IndexOutOfBoundsException("("+(r+off)+","+(c+off)+") vs ("+nrows+","+ncols+")"); } else { set_(r, c, v); } v } /* * Internal (unchecked) setter */ def set_(r:Int, c:Int, v:T) = { val ioff = Mat.ioneBased var ix = 0 if (ir != null) { ix = Mat.ibinsearch(r+ioff, ir, jc(c)-ioff, jc(c+1)-ioff) } else { ix = r+ioff - jc(c) } if (ix >= 0) data(ix) = v else throw new RuntimeException("Can't set missing values") } def explicitInds = { if (ir == null) { val ioff = Mat.ioneBased ir = new Array[Int](nnz) var i = 0 while (i < ncols) { var j = 0 while (j + jc(i) < jc(i)+1) { ir(j+jc(i)-ioff) = j+ioff j += 1 } i += 1 } } } /* * Transpose */ def gt:SparseMat[T] = { explicitInds val ic = SparseMat.uncompressInds(jc, ncols, ir); SparseMat.sparseImpl[T](ic, if (Mat.ioneBased==1) SparseMat.decInds(ir) else ir, data, ncols, nrows, nnz) } def gcountnz(n:Int, omat:Mat):IMat = { val dir = if (n > 0) n else { if (nrows == 1) 2 else 1 } val out = IMat.newOrCheckIMat(if (dir == 1) 1 else nrows, if (dir == 2) 1 else ncols, omat, this.GUID, "gcount".##) if (dir == 1) { var i = 0 while (i < ncols) { out.data(i) = jc(i+1)-jc(i) i += 1 } } else { val ioff = Mat.ioneBased out.clear var i = 0 while (i < nnz) { out.data(ir(i)-ioff) += 1 i += 1 } } out } /* * Stack matrices vertically */ def vertcat(a:SparseMat[T]):SparseMat[T] = if (ncols != a.ncols) { throw new RuntimeException("ncols must match") } else { if (ir != null) a.explicitInds if (a.ir != null) explicitInds val out = if (ir != null) { SparseMat.newOrCheck(nrows+a.nrows, ncols, nnz+a.nnz, null, false, GUID, a.GUID, "on".hashCode) } else { SparseMat.newOrCheck(nrows+a.nrows, ncols, nnz+a.nnz, null, true, GUID, a.GUID, "on".hashCode) } val ioff = Mat.ioneBased var ip = 0 var i = 0 out.jc(0) = ioff while (i < ncols) { var j = jc(i)-ioff while (j < jc(i+1)-ioff) { if (out.ir != null) out.ir(ip) = ir(j) out.data(ip) = data(j) ip += 1 j += 1 } j = a.jc(i)-ioff while (j < a.jc(i+1)-ioff) { if (out.ir != null) out.ir(ip) = a.ir(j) + nrows out.data(ip) = a.data(j) ip += 1 j += 1 } out.jc(i+1) = ip+ioff i += 1 } out } /* * Stack matrices horizontally */ def horzcat(a:SparseMat[T]):SparseMat[T] = if (nrows != a.nrows) { throw new RuntimeException("nrows must match") } else { if (ir != null) a.explicitInds if (a.ir != null) explicitInds val out = if (ir != null) { SparseMat.newOrCheck(nrows, ncols+a.ncols, nnz+a.nnz, null, false, GUID, a.GUID, "on".hashCode) } else { SparseMat.newOrCheck(nrows, ncols+a.ncols, nnz+a.nnz, null, true, GUID, a.GUID, "on".hashCode) } var ip = 0 System.arraycopy(data, 0, out.data, 0, nnz) System.arraycopy(a.data, 0, out.data, nnz, a.nnz) if (out.ir != null) { System.arraycopy(ir, 0, out.ir, 0, nnz) System.arraycopy(a.ir, 0, out.ir, nnz, a.nnz) } System.arraycopy(jc, 0, out.jc, 0, ncols+1) for (i <- 1 to a.ncols) { out.jc(i+ncols) = a.jc(i) + nnz } out } /* * Find indices (single) for all non-zeros elements */ def gfind:IMat = { var out = IMat.newOrCheckIMat(nnz, 1, null, GUID, "gfind".hashCode) val ioff = Mat.ioneBased val off = Mat.oneBased var i = 0 while (i < ncols) { var j = jc(i)-ioff if (ir != null) { while (j < jc(i+1)-ioff) { out.data(j) = ir(j)-ioff+off + i*nrows j += 1 } } else { while (j < jc(i+1)-ioff) { out.data(j) = j-jc(i)+ioff+off + i*nrows j += 1 } } i += 1 } out } /* * Find indices (i,j) for non-zero elements */ def gfind2:(IMat, IMat) = { var iout = IMat.newOrCheckIMat(nnz, 1, null, GUID, "gfind2_1".hashCode) var jout = IMat.newOrCheckIMat(nnz, 1, null, GUID, "gfind2_2".hashCode) val ioff = Mat.ioneBased val off = Mat.oneBased var i = 0 while (i < ncols) { var j = jc(i)-ioff if (ir != null) { while (j < jc(i+1)-ioff) { iout.data(j) = ir(j)-ioff+off j += 1 } } else { while (j < jc(i+1)-ioff) { iout.data(j) = j-jc(i)+ioff+off j += 1 } } i += 1 } if (off == 0) { System.arraycopy(SparseMat.uncompressInds(jc, ncols, ir), 0, jout.data, 0, nnz) } else { SparseMat.incInds(SparseMat.uncompressInds(jc, ncols, ir), jout.data) } (iout, jout) } /* * Find indices and values (i,j,v) for non-zero elements */ def gfind3:(IMat, IMat, DenseMat[T]) = { val vout = DenseMat.newOrCheck(nnz, 1, null, GUID, "gfind3_3".hashCode) val (iout, jout) = gfind2 System.arraycopy(data, 0, vout.data, 0, nnz) (iout, jout, vout) } /* * Implement a(im) = b where im is a matrix of indices to a and im and b are same-sized */ def update(im:IMat, b:SparseMat[T]) = { } /* * Implement slicing, a(iv,jv) where iv and jv are vectors, using ? as wildcard */ def gapply(iv:IMat, jv:IMat):SparseMat[T] = { val colinds = DenseMat.getInds(jv, ncols) val colsize = jv match {case dmy:MatrixWildcard => ncols; case _ => jv.length} iv match { case dummy:MatrixWildcard => { val ioff = Mat.ioneBased val off = Mat.oneBased var tnnz = 0 for (i <- 0 until colsize) tnnz += jc(colinds(i)-off+1) - jc(colinds(i)-off) val out = if (ir != null) { SparseMat.newOrCheck(nrows, colsize, tnnz, null, false, GUID, iv.GUID, jv.GUID, "gapply3".hashCode) } else { SparseMat.newOrCheck(nrows, colsize, tnnz, null, true, GUID, iv.GUID, jv.GUID, "gapply3".hashCode) } var inext = 0 var i = 0 out.jc(0) = ioff while (i < out.ncols) { val istep = jc(colinds(i)-off+1) - jc(colinds(i)-off) if (ir != null) System.arraycopy(ir, jc(colinds(i)-off)-ioff, out.ir, inext, istep) System.arraycopy(data, jc(colinds(i)-off)-ioff, out.data, inext, istep) inext += istep out.jc(i+1) = inext+ioff i += 1 } out } case _ => { explicitInds val off = Mat.oneBased val ioff = Mat.ioneBased val smat = SparseMat.newOrCheck(iv.length, nrows, iv.length, null, false, GUID, iv.GUID, jv.GUID, "gapply_x".hashCode) val im = IMat.newOrCheckIMat(iv.length, 1, null, GUID, iv.GUID, jv.GUID, "gapply_i".hashCode) var i = 0; while (i < iv.length) { smat.ir(i) = i+ioff im.data(i) = iv.data(i) i+=1 } Mat.ilexsort2(im.data, smat.ir) SparseMat.compressInds(im.data, nrows, smat.jc, iv.length) val colinds = DenseMat.getInds(jv, ncols) var tnnz = 0 i = 0 while (i < colsize) { var j = jc(colinds(i)-off)-ioff while (j < jc(colinds(i)-off+1)-ioff) { tnnz += smat.jc(ir(j)+1-ioff) - smat.jc(ir(j)-ioff) j += 1 } i += 1 } val out = SparseMat.newOrCheck(iv.length, colsize, tnnz, null, false, GUID, iv.GUID, jv.GUID, "gapply_y".hashCode) tnnz = 0 i = 0 out.jc(0) = ioff while (i < colsize) { var j = jc(colinds(i)-off)-ioff while (j < jc(colinds(i)-off+1)-ioff) { val dval = data(j) var k = smat.jc(ir(j)-ioff) - ioff while (k < smat.jc(ir(j)+1-ioff)-ioff) { out.ir(tnnz) = smat.ir(k) out.data(tnnz) = dval tnnz += 1 k += 1 } j += 1 } out.jc(i+1) = tnnz+ioff i += 1 } out } } } def gapply(iv:Int, jv:IMat):SparseMat[T] = gapply(IMat.ielem(iv), jv) def gapply(iv:IMat, jv:Int):SparseMat[T] = gapply(iv, IMat.ielem(jv)) def gcolslice(a:Int, b:Int, omat:Mat):SparseMat[T] = { val off = Mat.oneBased val ioff = Mat.ioneBased val newnnz = jc(b-off) - jc(a-off) val out = SparseMat.newOrCheck[T](nrows, b-a, newnnz, omat, false, GUID, "gcolslice".##) if (a-off < 0) throw new RuntimeException("colslice index out of range %d" format (a)) if (b-off > ncols) throw new RuntimeException("colslice index out of range %d %d" format (b-a, ncols)) val istart = jc(a-off)-ioff val iend = jc(b-off)-ioff System.arraycopy(ir, istart, out.ir, 0, iend-istart) System.arraycopy(data, istart, out.data, 0, iend-istart) var i = 0 while (i <= b-a) { out.jc(i) = jc(i+a) - jc(a) + ioff i += 1 } var j = i while (j <= omat.ncols) { out.jc(j) = out.jc(i-1) j += 1 } out.nnz0 = out.jc(i-1) - ioff // println("gcolslice2 %d %d" format (GUID, omat.GUID)) out } private def printOne(a:T):String = a match { case v:Double => { if (v % 1 == 0 && math.abs(v) < 1e10) { "%d" format v.intValue } else { "%.5g" format v } } case v:Float => { if (v % 1 == 0 && math.abs(v) < 1e5) { "%d" format v.intValue } else { "%.5g" format v } } case _ => "" } override def printOne(v0:Int):String = { val v = v0 + Mat.oneBased "%d" format v } override def toString:String = { val ioff = Mat.ioneBased val maxRows = 8 var fieldWidth = 4 val sb:StringBuilder = new StringBuilder val somespaces = " " var innz = 0 var icol = 0 while (innz < math.min(nnz, maxRows)) { while (innz >= jc(icol+1)-ioff) icol += 1 fieldWidth = math.max(fieldWidth, if (ir != null) 2+printOne(ir(innz)).length else 2+printOne(jc(icol+1)-jc(icol)).length) fieldWidth = math.max(fieldWidth, 2+printOne(icol).length) fieldWidth = math.max(fieldWidth, 2+printOne(data(innz)).length) innz += 1 } innz = 0; var innz0 = 0; icol = 0; while (innz < math.min(nnz, maxRows)) { while (innz >= jc(icol+1)-ioff) {icol += 1; innz0 = innz} var str = if (ir != null) printOne(ir(innz)-ioff) else printOne(innz-innz0); sb.append("("+somespaces.substring(0,fieldWidth-str.length)+str); str = printOne(icol); sb.append(","+somespaces.substring(0,fieldWidth-str.length)+str); str = printOne(data(innz)); sb.append(")"+somespaces.substring(0,fieldWidth-str.length)+str+"\\n"); innz += 1 } if (nnz > maxRows) { for (j <- 0 until 3) { sb.append(somespaces.substring(0, fieldWidth-2)+"...") } sb.append("\\n") } sb.toString() } def gsMult(a:SparseMat[T]):DenseMat[T] = if (ncols != a.nrows) throw new RuntimeException("dims mismatch") else { explicitInds a.explicitInds var myflops = 0L val out = DenseMat.newOrCheck(nrows, a.ncols, null, GUID, a.GUID, "sMult".hashCode) val ioff = Mat.ioneBased var i = 0 while (i < a.ncols) { val i0 = nrows*i var j = a.jc(i)-ioff while (j < a.jc(i+1)-ioff) { val ind = a.ir(j)-ioff val tval = a.data(j) var k = jc(ind)-ioff myflops += 2*(jc(ind+1)-ioff - k) while (k < jc(ind+1)-ioff) { val indx = ir(k)-ioff + i0 data(indx) = numeric.plus(data(indx), numeric.times(tval, data(k))) k += 1 } j += 1 } i += 1 } Mat.nflops += myflops out } def sgMatOp(b:SparseMat[T], op2:(T,T) => T, omat:Mat):SparseMat[T] = { Mat.nflops += nnz + b.nnz if (nrows==b.nrows && ncols==b.ncols) { if (ir != null) b.explicitInds if (b.ir != null) explicitInds if (ir == null) { sgMatOpNR(b,op2,omat) } else { val out = SparseMat.newOrCheck(nrows, ncols, nnz+b.nnz, omat, false, GUID, b.GUID, op2.hashCode) val ioff = Mat.ioneBased var nzc = 0 out.jc(0) = ioff var i = 0 while (i < ncols) { var ia = jc(i)-ioff var ib = b.jc(i)-ioff while (ia < jc(i+1)-ioff && ib < b.jc(i+1)-ioff) { if (ir(ia) < b.ir(ib)) { out.ir(nzc) = ir(ia) out.data(nzc) = op2(data(ia), numeric.zero) ia += 1 } else if (ir(ia) > b.ir(ib)) { out.ir(nzc) = b.ir(ib) out.data(nzc) = op2(numeric.zero, b.data(ib)) ib += 1 } else { out.ir(nzc) = ir(ia) out.data(nzc) = op2(data(ia), b.data(ib)) ia += 1 ib += 1 } nzc += 1 } while (ia < jc(i+1)-ioff) { out.ir(nzc) = ir(ia) out.data(nzc) = op2(data(ia), numeric.zero) ia += 1 nzc += 1 } while (ib < b.jc(i+1)-ioff) { out.ir(nzc) = b.ir(ib) out.data(nzc) = op2(numeric.zero, b.data(ib)) ib += 1 nzc += 1 } out.jc(i+1) = nzc+ioff i += 1 } out.sparseTrim } } else { throw new RuntimeException("dimensions mismatch") } } def sgMatOpD(b:DenseMat[T], op2:(T,T) => T, omat:Mat):SparseMat[T] = if (b.nrows > 1 && b.ncols > 1) { throw new RuntimeException("Sorry only edge operators supported for sparsemat op densemat") } else if ((b.nrows > 1 && b.nrows != nrows) || (b.ncols > 1 && b.ncols != ncols)) { throw new RuntimeException("Dimensions mismatch") } else { if (ir == null) explicitInds val out = SparseMat.newOrCheck[T](nrows, ncols, nnz, omat, false, GUID, b.GUID, op2.hashCode) val ioff = Mat.ioneBased var i = 0 while (i < ncols) { out.jc(i) = jc(i) var ia = jc(i)-ioff if (b.nrows == 1 && b.ncols == 1) { while (ia < jc(i+1)-ioff) { out.ir(ia) = ir(ia) out.data(ia) = op2(data(ia), b.data(0)) ia += 1 } } else if (b.nrows == 1) { while (ia < jc(i+1)-ioff) { out.ir(ia) = ir(ia) out.data(ia) = op2(data(ia), b.data(i)) ia += 1 } } else if (b.ncols == 1) { while (ia < jc(i+1)-ioff) { out.ir(ia) = ir(ia) out.data(ia) = op2(data(ia), b.data(ir(ia)-ioff)) ia += 1 } } i += 1 } out.jc(i) = jc(i) out.sparseTrim } def sgMatOpNR(b:SparseMat[T], op2:(T,T) => T, omat:Mat):SparseMat[T] = { val out = SparseMat.newOrCheck(nrows, ncols, nnz+b.nnz, omat, true, GUID, b.GUID, op2.hashCode) val ioff = Mat.ioneBased var nzc = 0 out.jc(0) = ioff for (i <- 0 until ncols) { var ia = jc(i)-ioff var ib = b.jc(i)-ioff while (ia < jc(i+1)-ioff && ib < b.jc(i+1)-ioff) { out.data(nzc) = op2(data(ia), b.data(ib)) ia += 1 ib += 1 nzc += 1 } while (ia < jc(i+1)-ioff) { out.data(nzc) = op2(data(ia), numeric.zero) ia += 1 nzc += 1 } while (ib < b.jc(i+1)-ioff) { out.data(nzc) = op2(numeric.zero, b.data(ib)) ib += 1 nzc += 1 } out.jc(i+1) = nzc+ioff } out.sparseTrim } def sgReduceOp(dim0:Int, op1:(T) => T, op2:(T,T) => T, omat:Mat):DenseMat[T] = { Mat.nflops += nnz var dim = if (nrows == 1 && dim0 == 0) 2 else math.max(1, dim0) val ioff = Mat.ioneBased if ((dim0 == 0) && (nrows == 1 || ncols == 1)) { // Sparse vector case val out = DenseMat.newOrCheck(1, 1, omat) var j = 0 var acc = op1(numeric.zero) while (j < nnz) { acc = op2(acc, data(j)) j += 1 } out.data(0) = acc out } else if (dim == 1) { val out = DenseMat.newOrCheck(1, ncols, omat, GUID, 1, op2.hashCode) var i = 0 while (i < ncols) { var acc = op1(numeric.zero) var j = jc(i)-ioff while (j < jc(i+1)-ioff) { acc = op2(acc, data(j)) j += 1 } out.data(i) = acc i += 1 } out } else if (dim == 2) { val out = DenseMat.newOrCheck(nrows, 1, omat, GUID, 2, op2.hashCode) out.clear if (ir != null) { var j = 0 while (j < nnz) { out.data(ir(j)-ioff) = op2(out.data(ir(j)-ioff), data(j)) j += 1 } } else { var i = 0 while (i < ncols) { var j = jc(i) while (j < jc(i+1)) { out.data(j-jc(i)) = op2(out.data(j-jc(i)), data(j-ioff)) j += 1 } i += 1 } } out } else throw new RuntimeException("index must 1 or 2") } def ssMatOpOne(b:DenseMat[T], op2:(T,T) => T, omat:Mat):SparseMat[T] = if (b.nrows == 1 && b.ncols == 1) { sgMatOpScalar(b.data(0), op2, omat) } else throw new RuntimeException("dims incompatible") def sgMatOpScalar(b:T, op2:(T,T) => T, outmat:Mat):SparseMat[T] = { val out = SparseMat.newOrCheck(nrows, ncols, nnz, outmat, (ir == null), GUID, op2.hashCode) var i = 0 out.jc(0) = jc(0) while (i < nnz) { out.data(i) = op2(data(i), b) if (ir != null) out.ir(i) = ir(i) i += 1 } i = 0 while (i <= ncols) { out.jc(i) = jc(i) i += 1 } out.sparseTrim } def sparseTrim:SparseMat[T] = { val ioff = Mat.ioneBased var i = 0 var nzc = 0 while (i < ncols) { var j = jc(i) while (j < jc(i+1)) { if (numeric.signum(data(j-ioff)) != 0) nzc += 1 j += 1 } i += 1 } if (nzc == nnz) { this } else { var out = this nzc = 0 var lastjc = 0 var i = 0 out.jc(0) = ioff while (i < ncols) { var j = lastjc while (j < jc(i+1)-ioff) { if (numeric.signum(data(j)) != 0) { out.data(nzc) = data(j) if (ir != null) out.ir(nzc) = ir(j) nzc += 1 } j += 1 } lastjc = jc(i+1)-ioff out.jc(i+1) = nzc+ioff i += 1 } nnz0 = nzc out } } def check = { val ioff = Mat.ioneBased var i = 0 if (jc(0) != ioff) { throw new RuntimeException("jc(0) should be "+ioff) } while (i < ncols) { var j = jc(i)-ioff if (jc(i) > jc(i+1)) { throw new RuntimeException("jc(i) out of order " + i + " " + jc(i) + " " + jc(i+1)) } if (ir != null) { while (j < jc(i+1)-ioff-1) { if (ir(j+1) <= ir(j)) { throw new RuntimeException("ir(j) out of order "+j+" "+ir(j)+" "+ir(j+1)) } if (ir(j) < ioff) { throw new RuntimeException("ir("+j+")="+ir(j)+" too small") } if (ir(j+1) >= nrows+ioff) { throw new RuntimeException("ir("+(j+1)+")="+ir(j+1)+" out of range "+(nrows+ioff)) } j += 1 } } i += 1 } if (jc(ncols) != nnz+ioff) { throw new RuntimeException("jc(ncols) should be "+nnz) } } def full():DenseMat[T] = full(null) def full(mat:Mat):DenseMat[T] = { val out = DenseMat.newOrCheck(nrows, ncols, mat, GUID, "full".hashCode) out.clear val ioff = Mat.ioneBased if (ir != null) { val cols = SparseMat.uncompressInds(jc, ncols, ir) var i = 0 while (i < nnz) { out.data(ir(i)-ioff + nrows*cols(i)) = data(i) i += 1 } } else { var i = 0 while (i < ncols) { var j = jc(i)-ioff while (j < jc(i+1)-ioff) { out.data(j-jc(i)+ioff + nrows*i) = data(j) j += 1 } i += 1 } } out } override def recycle(nr:Int, nc:Int, nnz:Int):SparseMat[T] = { val jc0 = if (jc.size >= nc+1) jc else new Array[Int](nc+1) val ir0 = if (ir.size >= nnz) ir else { if (Mat.useCache) { new Array[Int]((Mat.recycleGrow*nnz).toInt) } else { new Array[Int](nnz) } } val data0 = if (data.size >= nnz) data else { if (Mat.useCache) { new Array[T]((Mat.recycleGrow*nnz).toInt) } else { new Array[T](nnz) } } new SparseMat[T](nr, nc, nnz, ir0, jc0, data0) } } object SparseMat { def apply[T](nr:Int, nc:Int, nnz0:Int) (implicit manifest:Manifest[T], numeric:Numeric[T]):SparseMat[T] = { if (Mat.debugMem) { println("SparseMat %d %d %d" format (nr, nc, nnz0)); if (nnz0 > Mat.debugMemThreshold) throw new RuntimeException("SparseMat alloc too large"); } new SparseMat[T](nr, nc, nnz0, new Array[Int](nnz0), new Array[Int](nc+1), new Array[T](nnz0)); } def noRows[T](nr:Int, nc:Int, nnz0:Int) (implicit manifest:Manifest[T], numeric:Numeric[T]):SparseMat[T] = { if (Mat.debugMem) { println("SparseMat %d %d %d" format (nr, nc, nnz0)); if (nnz0 > Mat.debugMemThreshold) throw new RuntimeException("SparseMat alloc too large"); } new SparseMat[T](nr, nc, nnz0, null, new Array[Int](nc+1), new Array[T](nnz0)); } def remdups[@specialized(Double, Float) T](rows:Array[Int], cols:Array[Int], avals:Array[T]) (implicit manifest:Manifest[T], numeric:Numeric[T]):Int = { var i = 0 var j = 0 while (i < cols.length) { if (i == 0 || rows(i) != rows(i-1) || cols(i) != cols(i-1)) { cols(j) = cols(i) rows(j) = rows(i) avals(j) = avals(i) j += 1 } else { avals(j-1) = numeric.plus(avals(j-1), avals(i)) } i += 1 } j } def sparseImpl[@specialized(Double, Float) T](rows:Array[Int], cols:Array[Int], vals:Array[T], nrows:Int, ncols:Int, nnz:Int) (implicit manifest:Manifest[T], numeric:Numeric[T]):SparseMat[T] = { val ioff = Mat.ioneBased val out = if (rows != null) SparseMat[T](nrows, ncols, nnz) else noRows[T](nrows, ncols, nnz); val orows = out.ir; val ocols = new Array[Int](nnz); var i = 0; while (i < nnz) { ocols(i) = cols(i); i += 1; } val igood = if (orows != null) { i = 0; while (i < nnz) { orows(i) = rows(i) + ioff; i += 1; } val isort = BIDMat.Mat.ilexsort2(ocols, orows); i = 0; while (i < orows.length) {out.data(i) = vals(isort(i)); i+=1}; remdups(orows, ocols, out.data); } else { i = 0; while (i < vals.length) {out.data(i) = vals(i); i+=1}; nnz; } SparseMat.compressInds(ocols, ncols, out.jc, igood); out.sparseTrim } def compressInds(coli:Array[Int], ncols:Int, out:Array[Int], nnz0:Int):Array[Int] = { val ioff = Mat.ioneBased out(0) = ioff var j = 0 var i = 0 while (i < ncols) { while (j < nnz0 && coli(j) <= i) j+= 1 out(i+1) = j+ioff i += 1 } out } def uncompressInds(coli:Array[Int], ncols:Int, rowi:Array[Int], outx:Array[Int]):Array[Int] = { val ioff = Mat.ioneBased val out = if (outx != null) outx else new Array[Int](coli(ncols)-ioff) var i = 0 while (i < ncols) { var j = coli(i)-ioff while (j < coli(i+1)-ioff) { out(j) = i j+= 1 } i += 1 } out } def uncompressInds(coli:Array[Int], ncols:Int, rowi:Array[Int]):Array[Int] = uncompressInds(coli, ncols, rowi, null) def incInds(inds:Array[Int], out:Array[Int]):Array[Int] = { var i = 0 while (i < inds.length) { out(i) = inds(i) + 1 i += 1 } out } def incInds(inds:Array[Int]):Array[Int] = { val out = new Array[Int](inds.length) incInds(inds, out) } def decInds(inds:Array[Int]):Array[Int] = { val out = new Array[Int](inds.length) var i = 0 while (i < inds.length) { out(i) = inds(i) - 1 i += 1 } out } def newOrCheck[T](nr:Int, nc:Int, nnz:Int, oldmat:Mat, norows:Boolean = false) (implicit manifest:Manifest[T], numeric:Numeric[T]):SparseMat[T] = { if (oldmat.asInstanceOf[AnyRef] == null || (oldmat.nrows == 0 && oldmat.ncols == 0)) { if (Mat.useCache) { val m = if (norows) { SparseMat.noRows(nr, nc, (Mat.recycleGrow*nnz).toInt) } else { SparseMat(nr, nc, (Mat.recycleGrow*nnz).toInt) } m.nnz0 = nnz m } else { if (norows) { SparseMat.noRows(nr, nc, nnz) } else { SparseMat(nr, nc, nnz) } } } else { val omat = oldmat.asInstanceOf[SparseMat[T]]; if (omat.nrows == nr && omat.ncols == nc && nnz <= omat.data.length) { omat.nnz0 = nnz omat } else { omat.recycle(nr, nc, nnz) } } } def newOrCheck[T](nr:Int, nc:Int, nnz0:Int, outmat:Mat, norows:Boolean, matGuid:Long, opHash:Int) (implicit manifest:Manifest[T], numeric:Numeric[T]):SparseMat[T] = { if (outmat.asInstanceOf[AnyRef] != null || !Mat.useCache) { newOrCheck(nr, nc, nnz0, outmat, norows) } else { val key = (matGuid, opHash) val res = Mat.cache2(key) val omat = newOrCheck(nr, nc, nnz0, res, norows) if (res != omat) Mat.cache2put(key, omat) omat } } def newOrCheck[T](nr:Int, nc:Int, nnz0:Int, outmat:Mat, norows:Boolean, guid1:Long, guid2:Long, opHash:Int) (implicit manifest:Manifest[T], numeric:Numeric[T]):SparseMat[T] = { if (outmat.asInstanceOf[AnyRef] != null || !Mat.useCache) { newOrCheck(nr, nc, nnz0, outmat, norows) } else { val key = (guid1, guid2, opHash) val res = Mat.cache3(key) val omat = newOrCheck(nr, nc, nnz0, res, norows) if (res != omat) Mat.cache3put(key, omat) omat } } def newOrCheck[T](nr:Int, nc:Int, nnz0:Int, outmat:Mat, norows:Boolean, guid1:Long, guid2:Long, guid3:Long, opHash:Int) (implicit manifest:Manifest[T], numeric:Numeric[T]):SparseMat[T] = { if (outmat.asInstanceOf[AnyRef] != null || !Mat.useCache) { newOrCheck(nr, nc, nnz0, outmat, norows) } else { val key = (guid1, guid2, guid3, opHash) val res = Mat.cache4(key) val omat = newOrCheck(nr, nc, nnz0, res, norows) if (res != omat) Mat.cache4put(key, omat) omat } } }
phlip9/BIDMat
src/main/scala/BIDMat/SparseMat.scala
Scala
bsd-3-clause
28,499
package org.jmespike.conf import java.awt.{BorderLayout, Dimension} import java.awt.Window._ import javax.swing._ import java.awt.event.{ActionEvent, ActionListener} import org.scalaprops.{Property, Bean, BeanListener} import org.scalaprops.exporter.JsonBeanExporter import java.io._ import net.miginfocom.swing.MigLayout import org.scalaprops.ui.editors.{NestedBeanEditor} import org.scalaprops.parser.ParseError /** * */ class ConfEditor[T <: Conf](confChangeListener: (T) => Unit, defaultSavePath: File, confType: Class[T]) { private val frame = new JFrame() private var settings: T = null.asInstanceOf[T] private val mainPanel = new JPanel(new BorderLayout()) private var levelEditorUi: NestedBeanEditor[_] = null private val changeListener= new BeanListener { def onPropertyRemoved(bean: Bean, property: Property[ _ ]) {} def onPropertyAdded(bean: Bean, property: Property[ _ ]) {} def onPropertyChanged(bean: Bean, property: Property[ _ ]) { reLoadSettings() } } private var mostRecentSavePath: File = defaultSavePath def setSettings(_settings: T) { if (_settings != settings) { if (settings != null) settings.removeDeepListener(changeListener) settings = _settings if (levelEditorUi != null) mainPanel.remove(levelEditorUi) levelEditorUi = makeEditorUi if (levelEditorUi != null) mainPanel.add(levelEditorUi, BorderLayout.CENTER) mainPanel.invalidate() mainPanel.validate() mainPanel.repaint() frame.pack() if (settings != null) settings.addDeepListener(changeListener) } } def start() { levelEditorUi = makeEditorUi if (levelEditorUi!=null) mainPanel.add(levelEditorUi, BorderLayout.CENTER) val buttonPanel = new JPanel(new MigLayout()) mainPanel.add(buttonPanel, BorderLayout.NORTH) buttonPanel.add(makeReloadButton) // buttonPanel.add(makeLoadLevelButton) buttonPanel.add(makeSaveButton) buttonPanel.add(makeLoadButton) setupFrame(mainPanel) } def setActive(active: Boolean) { frame.setVisible(active) } private def setupFrame(mainPanel: JComponent) { frame.setTitle("Level Editor") frame.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE) frame.setPreferredSize(new Dimension(600, 800)) frame.setContentPane(mainPanel) frame.pack() frame.setVisible(true) } private def makeReloadButton: JComponent = { val button = new JButton("Reload Game") button.addActionListener(new ActionListener{ def actionPerformed(e: ActionEvent) { reLoadSettings() } }) button } /* private def makeLoadLevelButton: JComponent = { val button = new JButton("Load Level") button.addActionListener(new ActionListener{ def actionPerformed(e: ActionEvent) { loadLevel() } }) button } */ private def makeSaveButton: JComponent = { val button = new JButton("Save") button.addActionListener(new ActionListener{ def actionPerformed(e: ActionEvent) { save(settings) } }) button } private def makeLoadButton: JComponent = { val button = new JButton("Load") button.addActionListener(new ActionListener{ def actionPerformed(e: ActionEvent) { load() } }) button } private def reLoadSettings() { if (settings != null) confChangeListener(settings) } /* private def loadLevel() { if (settings != null) { val obj = levelEditorUi.beanSelector.getLastSelectedPathComponent if (obj != null && classOf[Level].isInstance(obj)) { Ludum20.loadLevel(obj.asInstanceOf[Level]) } } } */ private def makeEditorUi: NestedBeanEditor[_] = { if (settings != null) { val editor: NestedBeanEditor[_] = settings.createNestedEditor() editor } else null } private def save(settings: T) { val exporter = new JsonBeanExporter() val fc = new JFileChooser(mostRecentSavePath) val result = fc.showSaveDialog(frame) if (result == JFileChooser.APPROVE_OPTION) { val outfile: File = fc.getSelectedFile val writer = new BufferedWriter(new FileWriter(outfile, false)) exporter.export(settings, writer) writer.close() } mostRecentSavePath = fc.getCurrentDirectory /* NOTE: This is needed and only works if we are running from webstart: val inputStream = new ByteArrayInputStream(exporter.exportAsString(settings).getBytes("UTF-8")); var fss : FileSaveService = null try { fss = (ServiceManager.lookup("javax.jnlp.FileSaveService")).asInstanceOf[FileSaveService] } catch { case e: UnavailableServiceException => fss = null println("FileSaveService not available: " + e.getMessage) } if (fss != null) { try { val output: FileContents = fss.saveFileDialog(null, null, inputStream, "settings.json"); println("Save success: " + (output != null)) } catch { case e: Exception => e.printStackTrace() } } */ } private def load() { val parser: ConfParser = new ConfParser() val fc = new JFileChooser(mostRecentSavePath) val result = fc.showOpenDialog(frame) if (result == JFileChooser.APPROVE_OPTION) { val file: File = fc.getSelectedFile try { val bean: Bean = parser.parse(new FileReader(file), file.getPath) if (confType.isInstance(bean)) { val conf: T = bean.asInstanceOf[T] setSettings(conf) confChangeListener(conf) } else { JOptionPane.showMessageDialog(frame, "The selected file doesn't contain a configuration file of type " + confType.getName + ".\\nDid not load the config file.", "Wrong configuration type", JOptionPane.WARNING_MESSAGE) } } catch { case p: ParseError => JOptionPane.showMessageDialog(frame, "The selected file contains syntax errors:\\n" + p.getMessage + "\\nDid not load the config file.", "Parse error", JOptionPane.ERROR_MESSAGE) } } mostRecentSavePath = fc.getCurrentDirectory } }
zzorn/skycastle
src/main/scala/org/jmespike/conf/ConfEditor.scala
Scala
gpl-2.0
6,437
import com.trueaccord.proto.e2e.custom_types._ import com.trueaccord.proto.e2e.custom_types.CustomMessage.Weather import com.trueaccord.proto.e2e.CustomTypes.{CustomMessage => CustomMessageJava} import com.trueaccord.proto.e2e.CustomTypes.CustomMessage.{Weather => WeatherJava} import org.scalatest._ import com.trueaccord.pb._ class CustomTypesSpec extends FlatSpec with MustMatchers { "CustomMessage" should "serialize and parse" in { val message = CustomMessage( personId = Some(PersonId("abcd")), requiredPersonId = PersonId("required"), personIds = Seq(PersonId("p1"), PersonId("p2")), age = Some(Years(27)), requiredAge = Years(25), ages = Seq(Years(3), Years(8), Years(35)), name = Some(FullName(firstName = "Foo", lastName = "Bar")), requiredName = FullName(firstName = "Owen", lastName = "Money"), names = Seq( FullName(firstName = "Foo", lastName = "Bar"), FullName(firstName = "V1", lastName = "Z2")), weather = Some(WrappedWeather(Weather.RAIN)), requiredWeather = WrappedWeather(Weather.SUNNY), weathers = Seq(WrappedWeather(Weather.RAIN), WrappedWeather(Weather.SUNNY)), packedWeathers = Seq(WrappedWeather(Weather.RAIN), WrappedWeather(Weather.RAIN)) ) message.getPersonId must be(PersonId("abcd")) message.requiredPersonId must be(PersonId("required")) message.personIds must be(Seq(PersonId("p1"), PersonId("p2"))) message.getAge must be(Years(27)) message.requiredAge must be(Years(25)) message.ages must be(Seq(Years(3), Years(8), Years(35))) message.getName must be(FullName("Foo", "Bar")) CustomMessage.parseFrom(message.toByteArray) must be(message) CustomMessage.toJavaProto(message).getPersonId must be("abcd") CustomMessage.toJavaProto(message).getRequiredPersonId must be("required") CustomMessage.toJavaProto(message).getAge must be(27) CustomMessage.toJavaProto(message).getRequiredAge must be(25) CustomMessage.toJavaProto(message).getName.getFirst must be("Foo") CustomMessage.toJavaProto(message).getName.getLast must be("Bar") } "Custom message types" should "concatenate correctly" in { val m1 = { val b = CustomMessageJava.newBuilder b.getNameBuilder.setFirst("Foo") b.setRequiredPersonId("p1") b.getRequiredNameBuilder.setFirst("first_req") b.setAge(4) b.setRequiredAge(1) b.setRequiredWeather(WeatherJava.SUNNY) b.addPackedWeathers(WeatherJava.SUNNY) b.addPackedWeathers(WeatherJava.RAIN) b.build } val m2 = { val b = CustomMessageJava.newBuilder b.getNameBuilder.setLast("Bar") b.setRequiredPersonId("p2") b.getRequiredNameBuilder.setLast("last_req") b.setAge(5) b.setRequiredAge(2) b.setRequiredWeather(WeatherJava.RAIN) b.addPackedWeathers(WeatherJava.RAIN) b.addPackedWeathers(WeatherJava.SUNNY) b.build } val expected = CustomMessage( requiredPersonId = PersonId("p2"), requiredAge = Years(2), requiredName = FullName("first_req", "last_req"), requiredWeather = WrappedWeather(Weather.RAIN), packedWeathers = Seq( WrappedWeather(Weather.SUNNY), WrappedWeather(Weather.RAIN), WrappedWeather(Weather.RAIN), WrappedWeather(Weather.SUNNY) ) ) .update( _.name := FullName("Foo", "Bar"), _.age := Years(5) ) val concat = (m1.toByteArray ++ m2.toByteArray) CustomMessage.parseFrom(concat) must be(expected) } "Extended types" should "inherit from marker type" in { val t: DomainEvent = CustomerEvent( personId = Some(PersonId("123")), optionalNumber = Some(1), repeatedNumber = Seq(2,3,4), requiredNumber = 5) t mustBe a [DomainEvent] t.personId must be(Some(PersonId("123"))) t.optionalNumber must be(Some(1)) t.repeatedNumber must be(Seq(2,3,4)) t.requiredNumber must be(5) } "Extended companion objects" should "inherit from marker type" in { CustomerEvent mustBe a [DomainEventCompanion] CustomerEvent.thisIs must be("The companion object") } }
eiennohito/ScalaPB
e2e/src/test/scala/CustomTypesSpec.scala
Scala
apache-2.0
4,166
package bifrost.scorexMod /** * Created by cykoz on 4/13/17. */ import bifrost.transaction.Transaction import bifrost.transaction.box.proposition.Proposition import bifrost.transaction.state.MinimalState.VersionTag import bifrost.{NodeViewComponent, NodeViewModifier, PersistentNodeViewModifier} import scala.util.Try /** * Abstract functional interface of state which is a result of a sequential blocks applying */ trait GenericMinimalState[T, P <: Proposition, BX <: GenericBox[P, T], TX <: Transaction[P], M <: PersistentNodeViewModifier[P, TX], MS <: GenericMinimalState[T, P, BX, TX, M, MS]] extends NodeViewComponent { self: MS => def version: VersionTag def validate(transaction: TX): Try[Unit] def validate(mod: M): Try[Unit] = Try(mod.transactions.getOrElse(Seq()).foreach(tx => validate(tx).get)) def isValid(tx: TX): Boolean = validate(tx).isSuccess def filterValid(txs: Seq[TX]): Seq[TX] = txs.filter(isValid) def closedBox(boxId: Array[Byte]): Option[BX] //def boxesOf(proposition: P): Seq[BX] def changes(mod: M): Try[GenericStateChanges[T, P, BX]] def applyChanges(changes: GenericStateChanges[T, P, BX], newVersion: VersionTag): Try[MS] def applyModifier(mod: M): Try[MS] = { validate(mod) flatMap { r => changes(mod).flatMap(cs => applyChanges(cs, mod.id)) } } def applyModifiers(mods: Seq[M]): Try[MS] = mods.foldLeft(Try(this)) { case (curTry, mod) => curTry flatMap (_.applyModifier(mod)) } def rollbackTo(version: VersionTag): Try[MS] } object GenericMinimalState { type VersionTag = NodeViewModifier.ModifierId }
Topl/Project-Bifrost
src/main/scala/bifrost/scorexMod/GenericMinimalState.scala
Scala
mpl-2.0
1,619
package org.elastic.rest.scala.driver.utils import org.elastic.rest.scala.driver.RestBase.RestRequestException import org.elastic.rest.scala.driver.RestBaseImplicits._ import org.elastic.rest.scala.driver.RestBaseRuntimeTyped.{RuntimeStringToTypedHelper, RuntimeTypedToStringHelper} import scala.reflect.runtime._ import scala.reflect.runtime.universe._ import scala.util.Try /** * Supports typed object for custom classes only, ie if no JSON library is used * for some reason */ object NoJsonRuntimeHelpers { /** * Include this to support a typed API made entirely out of custom classes */ implicit val NoJsonTypedToStringHelper = new RuntimeTypedToStringHelper() { def fromTyped[T](t: T)(implicit ct: WeakTypeTag[T]): String = t match { case custom: CustomTypedToString => custom.fromTyped case _ => throw RestRequestException(s"Type ${t.getClass} not supported with JSON lib") } } /** * Include this to support a typed API made entirely out of custom classes */ implicit val NoJsontringToTypedHelper = new RuntimeStringToTypedHelper() { override def toType[T](s: String)(implicit ct: WeakTypeTag[T]): T = NoJsonRuntimeHelpers.createCustomTyped(s) } /** Handles getting at a class wtihin an object... * ...where the class is embedded in that object via a trait * * @param ct The type tag of the end class (ie inside an object) * @tparam T The type of the object being retrieved * @return A module mirror containing the end class */ def getOuterInstanceMirror[T](ct: universe.WeakTypeTag[T]): scala.reflect.runtime.universe.InstanceMirror = { // From: http://stackoverflow.com/questions/18056107/reflection-getting-module-mirror-from-inner-class-mixed-into-a-singleton-object // (doesn't give you everything though because can't trivially get access to the module instance, see next SO post!) val TypeRef(pre, _, _) = ct.tpe // From: http://stackoverflow.com/questions/17012294/recovering-a-singleton-instance-via-reflection-from-sealed-super-trait-when-typ // Getting closer val classSymbol = pre.typeSymbol.asClass val compSymbol = classSymbol.companionSymbol // (note using companion here fails) val moduleSymbol = compSymbol.asModule val moduleMirror = currentMirror.reflectModule(moduleSymbol) // Now we can get an instance of the outer type currentMirror.reflect(moduleMirror.instance) } /** Given a class with a single constructor taking a string, * creates an instance of the class * * TODO: needs to handle trait version, see `CirceTypeModule` example * * @param s The input to the ctor * @param ct The weak type tag of the custom typed output object * @tparam T The type of the custom typed output object * @return An instnce of the type */ def createCustomTyped[T](s: String)(implicit ct: universe.WeakTypeTag[T]): T = { val ctor = ct.tpe.members .filter(m => m.isMethod && m.asMethod.isPrimaryConstructor) .map(_.asMethod) .head val ctorMirror = Try { currentMirror.reflectClass(ct.tpe.typeSymbol.asClass) } .getOrElse { val moduleMirror = getOuterInstanceMirror(ct) val instanceMirror = currentMirror.reflect(moduleMirror.instance) instanceMirror.reflectClass(ct.tpe.typeSymbol.asClass) } .reflectConstructor(ctor)(s) ctorMirror.asInstanceOf[T] } }
Alex-At-Home/rest_client_library
rest_scala_core/jvm/src/main/scala/org/elastic/rest/scala/driver/utils/NoJsonRuntimeHelpers.scala
Scala
apache-2.0
3,537
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.scala.dsl import org.apache.camel.model.EnrichDefinition import org.apache.camel.processor.aggregate.AggregationStrategy import org.apache.camel.scala.dsl.builder.RouteBuilder /** * Scala enrichment for Camel's EnrichDefinition */ case class SEnrichDefinition(override val target: EnrichDefinition)(implicit val builder: RouteBuilder) extends SAbstractDefinition[EnrichDefinition] { def aggregationStrategy(strategy: AggregationStrategy) = wrap(target.setAggregationStrategy(strategy)) def aggregationStrategyRef(ref: String) = wrap(target.setAggregationStrategyRef(ref)) def resourceRef(ref: String) = wrap(target.setResourceRef(ref)) def resourceUri(resourceUri: String) = wrap(target.setResourceUri(resourceUri)) override def wrap(block: => Unit) = super.wrap(block).asInstanceOf[SEnrichDefinition] }
shuliangtao/apache-camel-2.13.0-src
components/camel-scala/src/main/scala/org/apache/camel/scala/dsl/SEnrichDefinition.scala
Scala
apache-2.0
1,657
package streams import akka.actor.ActorSystem import akka.stream.scaladsl._ import com.typesafe.config.ConfigFactory import java.awt.{BorderLayout, EventQueue} import javax.swing.{BorderFactory, JFrame, WindowConstants} import javax.swing.UIManager._ import org.jfree.chart.ChartPanel import org.jfree.data.time.{TimeSeries, TimeSeriesDataItem} import org.jfree.data.time.Millisecond import scala.concurrent.duration._ import scala.language.postfixOps import scala.util.Random object StreamingChartApp { private def addOrUpdate(timeSeries: TimeSeries): Unit = { timeSeries.addOrUpdate( new TimeSeriesDataItem( new Millisecond(), Random.nextDouble() ) ) () } private def addOrUpdateAsRunnable(timeSeries: TimeSeries): Runnable = new Runnable() { override def run(): Unit = { timeSeries.addOrUpdate( new TimeSeriesDataItem( new Millisecond(), Random.nextDouble() ) ) () } } def main(args: Array[String]): Unit = { implicit val system = ActorSystem.create("streaming-chart-app", ConfigFactory.load("app.conf")) implicit val dispatcher = system.dispatcher val timeSeries = new TimeSeries("Time") // 1. Update time series with akka stream. Source.tick(1 second, 1 second, ()).map( _ => addOrUpdate(timeSeries) ).runWith(Sink.ignore) // 2. Update time series with akka scheduler. val cancellable = system.scheduler.scheduleWithFixedDelay(2 seconds, 2 seconds)( addOrUpdateAsRunnable(timeSeries) ) // Warning: The app fails to terminate completely due to an Sbt conflict. // Use Control-C from commandline. Or select the Java app and Quit menu item. sys.addShutdownHook { cancellable.cancel() system.terminate() () } EventQueue.invokeLater( new Runnable() { override def run(): Unit = { setLookAndFeel(getSystemLookAndFeelClassName) val chart = StreamingChart(timeSeries) val chartPanel = new ChartPanel( chart.jFreeChart ) chartPanel.setBorder(BorderFactory.createEmptyBorder(15, 15, 15, 15)) val frame = new JFrame() frame.setTitle("Streaming Chart App") frame.setSize(900, 600) frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE) frame.setLocationRelativeTo(null) frame.add(chartPanel, BorderLayout.CENTER) frame.setVisible(true) } }) } }
objektwerks/akka.streams
src/main/scala/streams/StreamingChartApp.scala
Scala
apache-2.0
2,370
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package system.basic import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner import common.JsHelpers import common.WskTestHelpers import common.rest.WskRest @RunWith(classOf[JUnitRunner]) class WskRestUnicodePython2Tests extends WskUnicodeTests with WskTestHelpers with JsHelpers { override val wsk: common.rest.WskRest = new WskRest override lazy val actionKind = "python:2" override lazy val actionSource = "unicode2.py" }
duynguyen/incubator-openwhisk
tests/src/test/scala/system/basic/WskRestUnicodePython2Tests.scala
Scala
apache-2.0
1,254
import sbt._ import com.typesafe.sbt.SbtScalariform import com.typesafe.sbt.SbtScalariform.ScalariformKeys object ScalariformSupport { lazy val formatSettings = SbtScalariform.scalariformSettings ++ Seq( ScalariformKeys.preferences in Compile := formattingPreferences, ScalariformKeys.preferences in Test := formattingPreferences ) import scalariform.formatter.preferences._ def formattingPreferences = FormattingPreferences() .setPreference(RewriteArrowSymbols, true) .setPreference(AlignParameters, true) .setPreference(AlignSingleLineCaseStatements, true) .setPreference(DoubleIndentClassDeclaration, true) }
jrudolph/multi-scaladoc-browser
project/ScalariformSupport.scala
Scala
mit
663
package au.com.dius.pact.consumer.specs2 import java.util.concurrent.TimeUnit.MILLISECONDS import org.junit.Ignore import org.junit.runner.RunWith import org.specs2.mutable.Specification import org.specs2.runner.JUnitRunner import scala.concurrent.Await import scala.concurrent.duration.Duration //@RunWith(classOf[JUnitRunner]) @Ignore class Issue219PactSpec extends Specification with PactSpec { val consumer = "My Consumer" val provider = "My Provider" val timeout = Duration(1000, MILLISECONDS) override def is = uponReceiving("add a broker") .matching(path = "/api/broker/add", query = "options=delete.topic.enable%3Dtrue&broker=1") .willRespondWith(maybeBody = Some("{}")) .withConsumerTest((mockServer, _) => { val get = ConsumerService(mockServer.getUrl).simpleGet("/api/broker/add", "options=delete.topic.enable%3Dtrue&broker=1") Await.result(get, timeout) must be_==(200, "{}") }) }
DiUS/pact-jvm
consumer/specs2/src/test/scala/au/com/dius/pact/consumer/specs2/Issue219PactSpec.scala
Scala
apache-2.0
937
package controllers import play.api.mvc._ import play.api.libs.json.{JsObject, JsNull, Json} object Main extends Controller { def index = Action { implicit request => Ok(views.html.home()) } def ip = Action { request => Ok(Json.obj("origin" -> request.remoteAddress)) } def useragent = Action { request => Ok(Json.obj("user-agent" -> request.headers.get("User-Agent"))) } def headers = Action { request => Ok(Json.obj("headers" -> request.headers.toSimpleMap)) } def get = Action { request => val headers = Json.obj("headers" -> request.headers.toSimpleMap) val get = Json.obj("args" -> request.queryString.map(x => (x._1, x._2.mkString("")))) Ok(headers ++ get) } def post = Action { request => val headers = Json.obj("headers" -> request.headers.toSimpleMap) val get = Json.obj("args" -> request.queryString.map(x => (x._1, x._2.mkString("")))) val json = request.body.asJson.map(json => Json.obj("json" -> json)).getOrElse(Json.obj("json" -> JsNull)) val form = request.body.asFormUrlEncoded.map { form => Json.obj("form" -> form.map(x => (x._1, x._2.mkString("")))) }.getOrElse(Json.obj("form" -> Json.obj())) val formData = request.body.asMultipartFormData.map { form => val data = form.dataParts.map(x => (x._1, x._2.mkString(""))) val files = form.files.map(file => Json.obj( "filename" -> file.filename, "content-type" -> file.contentType)) Json.obj("data" -> data, "files" -> files) }.getOrElse(Json.obj("data" -> Json.obj(), "files" -> Json.obj())) Ok(json ++ form ++ headers ++ get ++ formData) } def status(status: Int) = Action { Status(status) } def redirect(count: Int) = Action { if (count > 1) Redirect(routes.Main.redirect(count - 1)) else Redirect(routes.Main.get()) } def redirectTo = Action { request => request.queryString.get("url").map(x => Redirect(x.mkString(""))).getOrElse(Redirect(routes.Main.get())) } def cookies = Action { request => Ok(Json.obj("cookies" -> request.cookies.foldLeft(Json.obj())((result, current) => result ++ Json.obj(current.name -> current.value)))) } def setCookies = Action { request => val newCookies = request.queryString.map(x => Cookie(x._1, x._2.mkString(""))) Redirect(routes.Main.cookies()).withCookies(newCookies.toSeq:_*) } def deleteCookies = Action { request => val removeCookies = request.queryString.map(x => DiscardingCookie(x._1)) Redirect(routes.Main.cookies()).discardingCookies(removeCookies.toSeq:_*) } def stream(count: Int) = Action { request => val total = if (count > 1000) 1000 else count Ok((1 to total).map(x => Json.stringify(Json.obj("origin" -> request.remoteAddress))).mkString("\\n")) } }
Rovak/play-httpbin
app/controllers/Main.scala
Scala
mit
2,806
package no.nextgentel.oss.akkatools.example.booking import java.util.UUID import java.util.concurrent.TimeUnit import akka.actor.Status.Failure import akka.actor.{ActorPath, ActorSystem, Props} import no.nextgentel.oss.akkatools.aggregate._ import scala.collection.mutable.ListBuffer import scala.concurrent.duration.FiniteDuration case class PrintTicketMessage(id:String) case class CinemaNotification(seatsBooked:List[String]) trait SeatIdGenerator { def generateNextSeatId():String } class DefaultSeatIdGenerator extends SeatIdGenerator { override def generateNextSeatId(): String = UUID.randomUUID().toString } // Aggregate class BookingAggregate(ourDispatcherActor: ActorPath, ticketPrintShop: ActorPath, cinemaNotifier: ActorPath, seatIdGenerator: SeatIdGenerator) extends GeneralAggregate[BookingEvent, BookingState](FiniteDuration(60, TimeUnit.SECONDS), ourDispatcherActor) { var state = BookingState.empty() // This is our initial state(Machine) // transform command to event override def cmdToEvent = { case c: OpenBookingCmd => ResultingEvent(BookingOpenEvent(c.seats)) case c: CloseBookingCmd => ResultingEvent(BookingClosedEvent()) case c: ReserveSeatCmd => // Generate a random seatId val seatId = seatIdGenerator.generateNextSeatId() val event = ReservationEvent(seatId) ResultingEvent(event) .withSuccessHandler( () => sender ! seatId ) // Send the seatId back .withErrorHandler ( errorMsg => sender ! Failure(new Exception(errorMsg)) ) case c: CancelSeatCmd => ResultingEvent(CancelationEvent(c.seatId)) .withSuccessHandler( () => sender ! "ok") .withErrorHandler( (errorMsg) => sender ! Failure(new Exception(errorMsg)) ) } override def generateResultingDurableMessages = { case e: BookingClosedEvent => // The booking has now been closed and we need to send an important notification to the Cinema val cinemaNotification = CinemaNotification(state.reservations.toList) ResultingDurableMessages(cinemaNotification, cinemaNotifier) case e: ReservationEvent => // The seat-reservation has been confirmed and we need to print the ticket val printShopMessage = PrintTicketMessage(e.id) ResultingDurableMessages(printShopMessage, ticketPrintShop) } } object BookingAggregate { def props(ourDispatcherActor: ActorPath, ticketPrintShop: ActorPath, cinemaNotifier: ActorPath, seatIdGenerator: SeatIdGenerator = new DefaultSeatIdGenerator()) = Props(new BookingAggregate(ourDispatcherActor, ticketPrintShop, cinemaNotifier, seatIdGenerator)) } // Setting up the builder we're going to use for our BookingAggregate and view class BookingAggregateBuilder(actorSystem: ActorSystem) extends GeneralAggregateBuilder[BookingEvent, BookingState](actorSystem, "booking", Some(BookingState.empty())) { def config(ticketPrintShop: ActorPath, cinemaNotifier: ActorPath): Unit = { withGeneralAggregateProps { ourDispatcher: ActorPath => BookingAggregate.props(ourDispatcher, ticketPrintShop, cinemaNotifier) } } }
zapodot/akka-tools
examples/aggregates/src/main/scala/no/nextgentel/oss/akkatools/example/booking/Booking.scala
Scala
mit
3,122
/* * Copyright 2015 Heiko Seeberger * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package de.heikoseeberger package object akkasse { /** * View from `A` to [[ServerSentEvent]]. */ @deprecated("See deprecation of `EventPublisher`", "1.1.0") type ToServerSentEvent[A] = A => ServerSentEvent }
jasonchaffee/akka-sse
akka-sse/src/main/scala/de/heikoseeberger/akkasse/package.scala
Scala
apache-2.0
823
package com.outr.arango import com.outr.arango.api.{APIIndex, APIIndexIndexHandle, APIIndexfulltext, APIIndexgeo, APIIndexpersistent, APIIndexttl} import com.outr.arango.api.model.{PostAPIIndexFulltext, PostAPIIndexGeo, PostAPIIndexPersistent, PostAPIIndexTtl} import io.youi.client.HttpClient import profig.JsonUtil import scala.concurrent.{ExecutionContext, Future} class ArangoIndex(client: HttpClient, dbName: String, collectionName: String) { def create(index: Index)(implicit ec: ExecutionContext): Future[IndexInfo] = { val future = index.`type` match { case IndexType.Persistent => APIIndexpersistent.post(client, collectionName, PostAPIIndexPersistent("persistent", Some(index.fields), Some(index.sparse), Some(index.unique))) case IndexType.Geo => APIIndexgeo.post(client, collectionName, PostAPIIndexGeo("geo", Some(index.fields), Some(index.geoJson.toString))) case IndexType.FullText => APIIndexfulltext.post(client, collectionName, PostAPIIndexFulltext("fulltext", Some(index.fields), Some(index.minLength))) case IndexType.TTL => APIIndexttl.post(client, collectionName, PostAPIIndexTtl("ttl", Some(index.expireAfterSeconds.toDouble), Some(index.fields))) } future.map(json => JsonUtil.fromJson[IndexInfo](json)) } def list()(implicit ec: ExecutionContext): Future[IndexList] = { APIIndex.get(client, collectionName).map(json => JsonUtil.fromJson[IndexList](json)) } def delete(id: Id[Index])(implicit ec: ExecutionContext): Future[IndexDelete] = { APIIndexIndexHandle.delete(client, collectionName, id.value).map(json => JsonUtil.fromJson[IndexDelete](json)) } }
outr/arangodb-scala
driver/src/main/scala/com/outr/arango/ArangoIndex.scala
Scala
mit
1,639
package lila.shutup case class UserRecord( _id: String, /* pub: Option[List[PublicLine]], intentionally not mapped to DB */ puf: Option[List[Double]], tef: Option[List[Double]], prm: Option[List[Double]], prc: Option[List[Double]], puc: Option[List[Double]] ) { def userId = _id def reports: List[TextReport] = List( TextReport(TextType.PublicForumMessage, ~puf), TextReport(TextType.TeamForumMessage, ~tef), TextReport(TextType.PrivateMessage, ~prm), TextReport(TextType.PrivateChat, ~prc), TextReport(TextType.PublicChat, ~puc) ) } case class TextAnalysis( text: String, badWords: List[String] ) { lazy val nbWords = text.split("""\\W+""").length def nbBadWords = badWords.size def ratio: Double = if (nbWords == 0) 0 else nbBadWords.toDouble / nbWords def dirty = ratio > 0 } sealed abstract class TextType( val key: String, val rotation: Int, val name: String ) object TextType { case object PublicForumMessage extends TextType("puf", 20, "Public forum message") case object TeamForumMessage extends TextType("tef", 20, "Team forum message") case object PrivateMessage extends TextType("prm", 20, "Private message") case object PrivateChat extends TextType("prc", 40, "Private chat") case object PublicChat extends TextType("puc", 60, "Public chat") } case class TextReport(textType: TextType, ratios: List[Double]) { def minRatios = textType.rotation / 15 def nbBad = ratios.count(_ > TextReport.unacceptableRatio) def tolerableNb = (ratios.size / 10) atLeast 3 def unacceptable = (ratios.sizeIs >= minRatios) && (nbBad > tolerableNb) } object TextReport { val unacceptableRatio = 1d / 30 }
luanlv/lila
modules/shutup/src/main/model.scala
Scala
mit
1,758
/* * Copyright 2012-2013 Stephane Godbillon (@sgodbillon) and Zenexity * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package reactivemongo.util object LazyLogger { import org.slf4j.{ LoggerFactory, Logger } /** * Returns the lazy logger matching the SLF4J `name`. * * @param name the logger name */ def apply(name: String): LazyLogger = new LazyLogger(LoggerFactory getLogger name) final class LazyLogger private[reactivemongo] (logger: Logger) { /** Returns the corresponding SLF4J logger. */ def slf4j = logger def trace(s: => String) { if (logger.isTraceEnabled) logger.trace(s) } def trace(s: => String, e: => Throwable) { if (logger.isTraceEnabled) logger.trace(s, e) } lazy val isDebugEnabled = logger.isDebugEnabled def debug(s: => String) { if (isDebugEnabled) logger.debug(s) } def debug(s: => String, e: => Throwable) { if (isDebugEnabled) logger.debug(s, e) } def info(s: => String) { if (logger.isInfoEnabled) logger.info(s) } def info(s: => String, e: => Throwable) { if (logger.isInfoEnabled) logger.info(s, e) } def warn(s: => String) { if (logger.isWarnEnabled) logger.warn(s) } def warn(s: => String, e: => Throwable) { if (logger.isWarnEnabled) logger.warn(s, e) } def error(s: => String) { if (logger.isErrorEnabled) logger.error(s) } def error(s: => String, e: => Throwable) { if (logger.isErrorEnabled) logger.error(s, e) } } }
maxime-gautre/ReactiveMongo
driver/src/main/scala/util/LazyLogger.scala
Scala
apache-2.0
2,000