code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.geotools
import java.util.regex.Pattern
import com.typesafe.config.ConfigFactory
import org.geotools.feature.simple.SimpleFeatureTypeBuilder
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geotools.RichAttributeDescriptors._
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.AttributeOptions._
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.Configs._
import org.locationtech.geomesa.utils.stats.{Cardinality, IndexCoverage}
import org.locationtech.geomesa.utils.text.KVPairParser
import org.opengis.feature.simple.SimpleFeatureType
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
import scala.util.Try
@RunWith(classOf[JUnitRunner])
class SimpleFeatureTypesTest extends Specification {
sequential
args(color = true)
"SimpleFeatureTypes" should {
"create an sft that" >> {
val sft = SimpleFeatureTypes.createType("testing", "id:Integer,dtg:Date,*geom:Point:srid=4326:index=true")
"has name \\'test\\'" >> { sft.getTypeName mustEqual "testing" }
"has three attributes" >> { sft.getAttributeCount must be_==(3) }
"has an id attribute which is " >> {
val idDescriptor = sft.getDescriptor("id")
"not null" >> { (idDescriptor must not).beNull }
"not indexed" >> { idDescriptor.getUserData.get("index") must beNull }
}
"has a default geom field called 'geom'" >> {
val geomDescriptor = sft.getGeometryDescriptor
geomDescriptor.getLocalName must be equalTo "geom"
}
"not include index flag for geometry" >> {
val geomDescriptor = sft.getGeometryDescriptor
geomDescriptor.getUserData.get("index") must beNull
}
"encode an sft properly" >> {
SimpleFeatureTypes.encodeType(sft) must be equalTo s"id:Integer,dtg:Date,*geom:Point:srid=4326"
}
"encode an sft properly without user data" >> {
sft.getUserData.put("geomesa.table.sharing", "true")
sft.getUserData.put("hello", "goodbye")
SimpleFeatureTypes.encodeType(sft) must be equalTo s"id:Integer,dtg:Date,*geom:Point:srid=4326"
}
"encode an sft properly with geomesa user data" >> {
val encoded = SimpleFeatureTypes.encodeType(sft, includeUserData = true)
encoded must startWith("id:Integer,dtg:Date,*geom:Point:srid=4326;")
encoded must contain("geomesa.index.dtg='dtg'")
encoded must contain("geomesa.table.sharing='true'")
encoded must not(contain("hello="))
}
"encode an sft properly with specified user data" >> {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
sft.setUserDataPrefixes(Seq("hello"))
val encoded = SimpleFeatureTypes.encodeType(sft, includeUserData = true)
encoded must startWith("id:Integer,dtg:Date,*geom:Point:srid=4326;")
encoded must contain("geomesa.user-data.prefix='hello'")
encoded must contain("geomesa.index.dtg='dtg'")
encoded must contain("geomesa.table.sharing='true'")
encoded must contain("hello='goodbye'")
}
}
"create an empty type" >> {
val sft = SimpleFeatureTypes.createType("test", "")
sft.getTypeName mustEqual "test"
sft.getAttributeDescriptors must beEmpty
}
"create an empty type with user data" >> {
val sft = SimpleFeatureTypes.createType("test", ";geomesa.table.sharing='true'")
sft.getTypeName mustEqual "test"
sft.getAttributeDescriptors must beEmpty
sft.getUserData.get("geomesa.table.sharing") mustEqual "true"
}
"handle namespaces" >> {
"simple ones" >> {
val sft = SimpleFeatureTypes.createType("ns:testing", "dtg:Date,*geom:Point:srid=4326")
sft.getName.getLocalPart mustEqual "testing"
sft.getName.getNamespaceURI mustEqual "ns"
sft.getTypeName mustEqual("testing")
}
"complex ones" >> {
val sft = SimpleFeatureTypes.createType("http://geomesa/ns:testing", "dtg:Date,*geom:Point:srid=4326")
sft.getName.getLocalPart mustEqual "testing"
sft.getName.getNamespaceURI mustEqual "http://geomesa/ns"
sft.getTypeName mustEqual("testing")
}
"invalid ones" >> {
val sft = SimpleFeatureTypes.createType("http://geomesa/ns:testing:", "dtg:Date,*geom:Point:srid=4326")
sft.getName.getLocalPart mustEqual "http://geomesa/ns:testing:"
sft.getName.getNamespaceURI must beNull
sft.getTypeName mustEqual("http://geomesa/ns:testing:")
}
}
"handle empty srid" >> {
val sft = SimpleFeatureTypes.createType("testing", "id:Integer:index=false,*geom:Point:index=true")
(sft.getGeometryDescriptor.getCoordinateReferenceSystem must not).beNull
}
"handle Int vs. Integer lexicographical ordering" >> {
val sft1 = SimpleFeatureTypes.createType("testing1", "foo:Int,*geom:Point:index=true")
val sft2 = SimpleFeatureTypes.createType("testing2", "foo:Integer,*geom:Point:index=true")
sft1.getAttributeCount must beEqualTo(2)
sft2.getAttributeCount must beEqualTo(2)
}
"handle no index attribute" >> {
val sft = SimpleFeatureTypes.createType("testing", "id:Integer,*geom:Point:index=true")
sft.getDescriptor("id").getIndexCoverage() mustEqual(IndexCoverage.NONE)
}
"handle no explicit geometry" >> {
val sft = SimpleFeatureTypes.createType("testing", "id:Integer,geom:Point:index=true,geom2:Geometry")
sft.getGeometryDescriptor.getLocalName must be equalTo "geom"
}
"handle a namespace" >> {
val sft = SimpleFeatureTypes.createType("foo:testing", "id:Integer,geom:Point:index=true,geom2:Geometry")
sft.getName.getNamespaceURI must be equalTo "foo"
}
"return the indexed attributes (not including the default geometry)" >> {
val sft = SimpleFeatureTypes.createType("testing", "id:Integer:index=false,dtg:Date:index=true,*geom:Point:srid=4326:index=true")
val indexed = SimpleFeatureTypes.getSecondaryIndexedAttributes(sft)
indexed.map(_.getLocalName) must containTheSameElementsAs(List("dtg"))
}
"handle list types" >> {
"with no values specified" >> {
val sft = SimpleFeatureTypes.createType("testing", "id:Integer,names:List,dtg:Date,*geom:Point:srid=4326")
sft.getAttributeCount mustEqual(4)
sft.getDescriptor("names") must not beNull
sft.getDescriptor("names").getType.getBinding mustEqual(classOf[java.util.List[_]])
val spec = SimpleFeatureTypes.encodeType(sft)
spec mustEqual s"id:Integer,names:List[String],dtg:Date,*geom:Point:srid=4326"
}
"with defined values" >> {
val sft = SimpleFeatureTypes.createType("testing", "id:Integer,names:List[Double],dtg:Date,*geom:Point:srid=4326")
sft.getAttributeCount mustEqual(4)
sft.getDescriptor("names") must not beNull
sft.getDescriptor("names").getType.getBinding mustEqual(classOf[java.util.List[_]])
val spec = SimpleFeatureTypes.encodeType(sft)
spec mustEqual s"id:Integer,names:List[Double],dtg:Date,*geom:Point:srid=4326"
}
"fail for illegal value format" >> {
val spec = "id:Integer,names:List[Double][Double],dtg:Date,*geom:Point:srid=4326"
SimpleFeatureTypes.createType("testing", spec) should throwAn[IllegalArgumentException]
}
"fail for illegal value classes" >> {
val spec = "id:Integer,names:List[FAKE],dtg:Date,*geom:Point:srid=4326"
SimpleFeatureTypes.createType("testing", spec) should throwAn[IllegalArgumentException]
}
}
"handle map types" >> {
"with no values specified" >> {
val sft = SimpleFeatureTypes.createType("testing", "id:Integer,metadata:Map,dtg:Date,*geom:Point:srid=4326")
sft.getAttributeCount mustEqual(4)
sft.getDescriptor("metadata") must not beNull
sft.getDescriptor("metadata").getType.getBinding mustEqual classOf[java.util.Map[_, _]]
val spec = SimpleFeatureTypes.encodeType(sft)
spec mustEqual s"id:Integer,metadata:Map[String,String],dtg:Date,*geom:Point:srid=4326"
}
"with defined values" >> {
val sft = SimpleFeatureTypes.createType("testing", "id:Integer,metadata:Map[Double,String],dtg:Date,*geom:Point:srid=4326")
sft.getAttributeCount mustEqual(4)
sft.getDescriptor("metadata") must not beNull
sft.getDescriptor("metadata").getType.getBinding mustEqual classOf[java.util.Map[_, _]]
val spec = SimpleFeatureTypes.encodeType(sft)
spec mustEqual s"id:Integer,metadata:Map[Double,String],dtg:Date,*geom:Point:srid=4326"
}
"with a byte array as a value" >> {
val sft = SimpleFeatureTypes.createType("testing", "byteMap:Map[String,Bytes]")
sft.getAttributeCount mustEqual(1)
sft.getDescriptor("byteMap") must not beNull
sft.getDescriptor("byteMap").getType.getBinding mustEqual classOf[java.util.Map[_, _]]
val spec = SimpleFeatureTypes.encodeType(sft)
spec mustEqual s"byteMap:Map[String,Bytes]"
}
"fail for illegal value format" >> {
val spec = "id:Integer,metadata:Map[String],dtg:Date,*geom:Point:srid=4326"
SimpleFeatureTypes.createType("testing", spec) should throwAn[IllegalArgumentException]
}
"fail for illegal value classes" >> {
val spec = "id:Integer,metadata:Map[String,FAKE],dtg:Date,*geom:Point:srid=4326"
SimpleFeatureTypes.createType("testing", spec) should throwAn[IllegalArgumentException]
}
}
"handle splitter and splitter options" >> {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
val spec = "name:String,dtg:Date,*geom:Point:srid=4326;table.splitter.class=org.locationtech.geomesa.core.data.DigitSplitter,table.splitter.options='fmt:%02d,min:0,max:99'"
val sft = SimpleFeatureTypes.createType("test", spec)
sft.getUserData.get(TABLE_SPLITTER) must be equalTo "org.locationtech.geomesa.core.data.DigitSplitter"
val opts = KVPairParser.parse(sft.getTableSplitterOptions)
opts must haveSize(3)
opts.get("fmt") must beSome("%02d")
opts.get("min") must beSome("0")
opts.get("max") must beSome("99")
}
"handle enabled indexes" >> {
val spec = "name:String,dtg:Date,*geom:Point:srid=4326;geomesa.indices.enabled='st_idx,records,z3'"
val sft = SimpleFeatureTypes.createType("test", spec)
sft.getUserData.get(ENABLED_INDICES).toString.split(",").toList must be equalTo List("st_idx", "records", "z3")
}
"handle splitter opts and enabled indexes" >> {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
val specs = List(
"name:String,dtg:Date,*geom:Point:srid=4326;table.splitter.class=org.locationtech.geomesa.core.data.DigitSplitter,table.splitter.options='fmt:%02d,min:0,max:99',geomesa.indices.enabled='st_idx,records,z3'",
"name:String,dtg:Date,*geom:Point:srid=4326;geomesa.indices.enabled='st_idx,records,z3',table.splitter.class=org.locationtech.geomesa.core.data.DigitSplitter,table.splitter.options='fmt:%02d,min:0,max:99'")
specs.forall { spec =>
val sft = SimpleFeatureTypes.createType("test", spec)
sft.getUserData.get(TABLE_SPLITTER) must be equalTo "org.locationtech.geomesa.core.data.DigitSplitter"
val opts = KVPairParser.parse(sft.getTableSplitterOptions)
opts must haveSize(3)
opts.get("fmt") must beSome("%02d")
opts.get("min") must beSome("0")
opts.get("max") must beSome("99")
sft.getUserData.get(ENABLED_INDICES).toString.split(",").toList must be equalTo List("st_idx", "records", "z3")
}
}
"allow arbitrary feature options in user data" >> {
val spec = "name:String,dtg:Date,*geom:Point:srid=4326;a='',c=d,x=',,,',z=23562356"
val sft = SimpleFeatureTypes.createType("foobar", spec)
sft.getUserData.toList must containAllOf(Seq("a" -> "", "c" -> "d", "x" -> ",,,", "z" -> "23562356"))
}
"allow user data with a unicode character" >> {
val spec = "name:String,dtg:Date,*geom:Point:srid=4326;geomesa.table.sharing.prefix='\\\\u0001',geomesa.mixed.geometries='true',table.indexes.enabled='',geomesa.table.sharing='true',geomesa.all.user.data='true'"
val sft = SimpleFeatureTypes.createType("foobar", spec)
sft.getUserData.toList must containAllOf(Seq("geomesa.table.sharing.prefix" -> "\\u0001", "geomesa.mixed.geometries" -> "true", "geomesa.table.sharing" -> "true"))
}
"allow specification of ST index entry values" >> {
val spec = s"name:String:index=true:$OPT_INDEX_VALUE=true,dtg:Date,*geom:Point:srid=4326"
val sft = SimpleFeatureTypes.createType("test", spec)
sft.getDescriptor("name").isIndexValue() must beTrue
}
"allow specification of attribute cardinality" >> {
val spec = s"name:String:$OPT_CARDINALITY=high,dtg:Date,*geom:Point:srid=4326"
val sft = SimpleFeatureTypes.createType("test", spec)
sft.getDescriptor("name").getUserData.get(OPT_CARDINALITY) mustEqual("high")
sft.getDescriptor("name").getCardinality() mustEqual(Cardinality.HIGH)
}
"allow specification of attribute cardinality regardless of case" >> {
val spec = s"name:String:$OPT_CARDINALITY=LOW,dtg:Date,*geom:Point:srid=4326"
val sft = SimpleFeatureTypes.createType("test", spec)
sft.getDescriptor("name").getUserData.get(OPT_CARDINALITY) mustEqual("low")
sft.getDescriptor("name").getCardinality() mustEqual(Cardinality.LOW)
}.pendingUntilFixed("currently case sensitive")
"allow specification of index attribute coverages" >> {
val spec = s"name:String:$OPT_INDEX=join,dtg:Date,*geom:Point:srid=4326"
val sft = SimpleFeatureTypes.createType("test", spec)
sft.getDescriptor("name").getUserData.get(OPT_INDEX) mustEqual("join")
sft.getDescriptor("name").getIndexCoverage() mustEqual(IndexCoverage.JOIN)
}
"allow specification of index attribute coverages regardless of case" >> {
val spec = s"name:String:$OPT_INDEX=FULL,dtg:Date,*geom:Point:srid=4326"
val sft = SimpleFeatureTypes.createType("test", spec)
sft.getDescriptor("name").getUserData.get(OPT_INDEX) mustEqual("full")
sft.getDescriptor("name").getIndexCoverage() mustEqual(IndexCoverage.FULL)
}.pendingUntilFixed("currently case sensitive")
"allow specification of index attribute coverages as booleans" >> {
val spec = s"name:String:$OPT_INDEX=true,dtg:Date,*geom:Point:srid=4326"
val sft = SimpleFeatureTypes.createType("test", spec)
sft.getDescriptor("name").getUserData.get(OPT_INDEX) mustEqual("true")
sft.getDescriptor("name").getIndexCoverage() mustEqual(IndexCoverage.JOIN)
}
"encode date attribute types" >> {
val sft: SimpleFeatureType = {
val builder = new SimpleFeatureTypeBuilder()
builder.setName("test")
builder.add("date", classOf[java.util.Date])
builder.add("sqlDate", classOf[java.sql.Date])
builder.add("sqlTimestamp", classOf[java.sql.Timestamp])
builder.buildFeatureType()
}
SimpleFeatureTypes.encodeDescriptor(sft, sft.getDescriptor(0)) mustEqual "date:Date"
SimpleFeatureTypes.encodeDescriptor(sft, sft.getDescriptor(1)) mustEqual "sqlDate:Date"
SimpleFeatureTypes.encodeDescriptor(sft, sft.getDescriptor(2)) mustEqual "sqlTimestamp:Timestamp"
}
"create schemas from sql dates" >> {
SimpleFeatureTypes.createType("test", "dtg:Timestamp,*geom:Point:srid=4326")
.getDescriptor(0).getType.getBinding mustEqual classOf[java.sql.Timestamp]
}
"return meaningful error messages" >> {
Try(SimpleFeatureTypes.createType("test", null)) must
beAFailedTry.withThrowable[IllegalArgumentException](Pattern.quote("Invalid spec string: null"))
val failures = Seq(
("foo:Strong", "7. Expected attribute type binding"),
("foo:String,*bar:String", "16. Expected geometry type binding"),
("foo:String,bar:String;;", "22. Expected one of: feature type option, end of spec"),
("foo:String,bar,baz:String", "14. Expected one of: attribute name, attribute type binding, geometry type binding"),
("foo:String:bar,baz:String", "14. Expected attribute option")
)
forall(failures) { case (spec, message) =>
val pattern = Pattern.quote(s"Invalid spec string at index $message.")
val result = Try(SimpleFeatureTypes.createType("test", spec))
result must beAFailedTry.withThrowable[IllegalArgumentException](pattern)
}
}
"build from conf" >> {
def doTest(sft: SimpleFeatureType) = {
sft.getAttributeCount must be equalTo 4
sft.getGeometryDescriptor.getName.getLocalPart must be equalTo "geom"
sft.getDescriptor("testStr").getCardinality() mustEqual(Cardinality.UNKNOWN)
sft.getDescriptor("testCard").getCardinality() mustEqual(Cardinality.HIGH)
sft.getTypeName must be equalTo "testconf"
}
"with no path" >> {
val regular = ConfigFactory.parseString(
"""
|{
| type-name = "testconf"
| fields = [
| { name = "testStr", type = "string" , index = true },
| { name = "testCard", type = "string" , index = true, cardinality = high },
| { name = "testList", type = "List[String]" , index = false },
| { name = "geom", type = "Point" , srid = 4326, default = true }
| ]
|}
""".stripMargin)
val sftRegular = SimpleFeatureTypes.createType(regular)
doTest(sftRegular)
}
"with some nesting path" >>{
val someNesting = ConfigFactory.parseString(
"""
|{
| foobar = {
| type-name = "testconf"
| fields = [
| { name = "testStr", type = "string" , index = true },
| { name = "testCard", type = "string" , index = true, cardinality = high },
| { name = "testList", type = "List[String]" , index = false },
| { name = "geom", type = "Point" , srid = 4326, default = true }
| ]
| }
|}
""".stripMargin)
val someSft = SimpleFeatureTypes.createType(someNesting, path = Some("foobar"))
doTest(someSft)
}
"with multiple nested paths" >> {
val customNesting = ConfigFactory.parseString(
"""
|baz = {
| foobar = {
| type-name = "testconf"
| fields = [
| { name = "testStr", type = "string" , index = true },
| { name = "testCard", type = "string" , index = true, cardinality = high },
| { name = "testList", type = "List[String]" , index = false },
| { name = "geom", type = "Point" , srid = 4326, default = true }
| ]
| }
|}
""".stripMargin)
val sftCustom = SimpleFeatureTypes.createType(customNesting, path = Some("baz.foobar"))
doTest(sftCustom)
}
}
"build from default nested conf" >> {
val conf = ConfigFactory.parseString(
"""
|sft = {
| type-name = "testconf"
| fields = [
| { name = "testStr", type = "string" , index = true },
| { name = "testCard", type = "string" , index = true, cardinality = high },
| { name = "testList", type = "List[String]" , index = false },
| { name = "geom", type = "Point" , srid = 4326, default = true }
| ]
|}
""".stripMargin)
val sft = SimpleFeatureTypes.createType(conf)
sft.getAttributeCount must be equalTo 4
sft.getGeometryDescriptor.getName.getLocalPart must be equalTo "geom"
sft.getDescriptor("testStr").getCardinality() mustEqual(Cardinality.UNKNOWN)
sft.getDescriptor("testCard").getCardinality() mustEqual(Cardinality.HIGH)
sft.getTypeName must be equalTo "testconf"
}
"allow user data in conf" >> {
val conf = ConfigFactory.parseString(
"""
|{
| type-name = "testconf"
| fields = [
| { name = "testStr", type = "string" , index = true },
| { name = "testCard", type = "string" , index = true, cardinality = high },
| { name = "testList", type = "List[String]" , index = false },
| { name = "geom", type = "Point" , srid = 4326, default = true }
| ]
| user-data = {
| mydataone = true
| mydatatwo = "two"
| }
|}
""".stripMargin)
val sft = SimpleFeatureTypes.createType(conf)
sft.getAttributeCount must be equalTo 4
sft.getGeometryDescriptor.getName.getLocalPart must be equalTo "geom"
sft.getDescriptor("testStr").getCardinality() mustEqual(Cardinality.UNKNOWN)
sft.getDescriptor("testCard").getCardinality() mustEqual(Cardinality.HIGH)
sft.getTypeName must be equalTo "testconf"
sft.getUserData.size() mustEqual 2
sft.getUserData.get("mydataone") mustEqual "true"
sft.getUserData.get("mydatatwo") mustEqual "two"
}
"untyped lists and maps as a type" >> {
val conf = ConfigFactory.parseString(
"""
|{
| type-name = "testconf"
| fields = [
| { name = "testList", type = "List" , index = false },
| { name = "testMap", type = "Map" , index = false },
| { name = "geom", type = "Point" , srid = 4326, default = true }
| ]
|}
""".stripMargin)
val sft = SimpleFeatureTypes.createType(conf)
sft.getAttributeCount must be equalTo 3
sft.getGeometryDescriptor.getName.getLocalPart must be equalTo "geom"
sft.getAttributeDescriptors.get(0).getType.getBinding must beAssignableFrom[java.util.List[_]]
sft.getAttributeDescriptors.get(1).getType.getBinding must beAssignableFrom[java.util.Map[_,_]]
}
"bytes as a type to work" >> {
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.AttributeConfigs._
val conf = ConfigFactory.parseString(
"""
|{
| type-name = "byteconf"
| fields = [
| { name = "blob", type = "Bytes", index = false }
| { name = "blobList", type = "List[Bytes]", index = false }
| { name = "blobMap", type = "Map[String, Bytes]", index = false }
| ]
|}
""".stripMargin)
val sft = SimpleFeatureTypes.createType(conf)
sft.getAttributeCount must be equalTo 3
sft.getAttributeDescriptors.get(0).getType.getBinding must beAssignableFrom[Array[Byte]]
sft.getAttributeDescriptors.get(1).getType.getBinding must beAssignableFrom[java.util.List[_]]
sft.getAttributeDescriptors.get(1).getUserData.get(USER_DATA_LIST_TYPE) mustEqual classOf[Array[Byte]].getName
sft.getAttributeDescriptors.get(2).getType.getBinding must beAssignableFrom[java.util.Map[_,_]]
sft.getAttributeDescriptors.get(2).getUserData.get(USER_DATA_MAP_KEY_TYPE) mustEqual classOf[String].getName
sft.getAttributeDescriptors.get(2).getUserData.get(USER_DATA_MAP_VALUE_TYPE) mustEqual classOf[Array[Byte]].getName
}
"render SFTs as config again" >> {
import scala.collection.JavaConverters._
val conf = ConfigFactory.parseString(
"""
|{
| type-name = "testconf"
| fields = [
| { name = "testStr", type = "string" , index = true }
| { name = "testCard", type = "string" , index = true, cardinality = high }
| { name = "testList", type = "List[String]" , index = false }
| { name = "testMap", type = "Map[String, String]", index = false }
| { name = "dtg", type = "Date" }
| { name = "dtg2", type = "Date" } // not default because of ordering
| { name = "geom", type = "Point" , srid = 4326, default = true }
| ]
| user-data = {
| "geomesa.one" = "true"
| geomesa.two = "two"
| }
|}
""".stripMargin)
val sft = SimpleFeatureTypes.createType(conf)
val typeConf = SimpleFeatureTypes.toConfig(sft).getConfig("geomesa.sfts.testconf")
typeConf.getString("type-name") mustEqual "testconf"
def getFieldOpts(s: String) =
typeConf.getConfigList("attributes").filter(_.getString("name") == s).get(0).entrySet().map { case e =>
e.getKey -> e.getValue.unwrapped()
}.toMap
getFieldOpts("testStr") must havePairs("name" -> "testStr", "type" -> "String", "index" -> "true")
getFieldOpts("testCard") must havePairs("name" -> "testCard", "type" -> "String",
"index" -> "true", "cardinality" -> "high")
getFieldOpts("testList") must havePairs("name" -> "testList", "type" -> "List[String]")
getFieldOpts("testMap") must havePairs("name" -> "testMap", "type" -> "Map[String,String]")
getFieldOpts("dtg") must havePairs("name" -> "dtg", "type" -> "Date", "default" -> "true")
getFieldOpts("dtg2") must havePairs("name" -> "dtg2", "type" -> "Date")
getFieldOpts("geom") must havePairs("name" -> "geom", "type" -> "Point",
"srid" -> "4326", "default" -> "true")
val userdata = typeConf.getConfig(SimpleFeatureSpecConfig.UserDataPath).root.unwrapped().asScala
userdata must havePairs("geomesa.one" -> "true", "geomesa.two" -> "two")
}
"render sfts as config with table sharing" >> {
import scala.collection.JavaConverters._
val sft = SimpleFeatureTypes.createType("geolife",
"userId:String,trackId:String,altitude:Double,dtg:Date,*geom:Point:srid=4326;" +
"geomesa.index.dtg='dtg',geomesa.table.sharing='true',geomesa.indices='z3:4:3,z2:3:3,records:2:3'," +
"geomesa.table.sharing.prefix='\\\\u0001'")
val config = SimpleFeatureTypes.toConfig(sft, includePrefix = false)
config.hasPath(SimpleFeatureSpecConfig.UserDataPath) must beTrue
val userData = config.getConfig(SimpleFeatureSpecConfig.UserDataPath)
userData.root.unwrapped().asScala mustEqual Map(
"geomesa.index.dtg" -> "dtg",
"geomesa.indices" -> "z3:4:3,z2:3:3,records:2:3",
"geomesa.table.sharing" -> "true",
"geomesa.table.sharing.prefix" -> "\\u0001"
)
}
}
}
| jahhulbert-ccri/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/geotools/SimpleFeatureTypesTest.scala | Scala | apache-2.0 | 27,532 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.aggregate
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenContext
import org.apache.spark.sql.execution.vectorized.{MutableColumnarRow, OnHeapColumnVector}
import org.apache.spark.sql.types._
import org.apache.spark.sql.vectorized.ColumnarBatch
/**
* This is a helper class to generate an append-only vectorized hash map that can act as a 'cache'
* for extremely fast key-value lookups while evaluating aggregates (and fall back to the
* `BytesToBytesMap` if a given key isn't found). This is 'codegened' in HashAggregate to speed
* up aggregates w/ key.
*
* It is backed by a power-of-2-sized array for index lookups and a columnar batch that stores the
* key-value pairs. The index lookups in the array rely on linear probing (with a small number of
* maximum tries) and use an inexpensive hash function which makes it really efficient for a
* majority of lookups. However, using linear probing and an inexpensive hash function also makes it
* less robust as compared to the `BytesToBytesMap` (especially for a large number of keys or even
* for certain distribution of keys) and requires us to fall back on the latter for correctness. We
* also use a secondary columnar batch that logically projects over the original columnar batch and
* is equivalent to the `BytesToBytesMap` aggregate buffer.
*
* NOTE: This vectorized hash map currently doesn't support nullable keys and falls back to the
* `BytesToBytesMap` to store them.
*/
class VectorizedHashMapGenerator(
ctx: CodegenContext,
aggregateExpressions: Seq[AggregateExpression],
generatedClassName: String,
groupingKeySchema: StructType,
bufferSchema: StructType)
extends HashMapGenerator (ctx, aggregateExpressions, generatedClassName,
groupingKeySchema, bufferSchema) {
override protected def initializeAggregateHashMap(): String = {
val generatedSchema: String =
s"new org.apache.spark.sql.types.StructType()" +
(groupingKeySchema ++ bufferSchema).map { key =>
val keyName = ctx.addReferenceObj("keyName", key.name)
key.dataType match {
case d: DecimalType =>
s""".add($keyName, org.apache.spark.sql.types.DataTypes.createDecimalType(
|${d.precision}, ${d.scale}))""".stripMargin
case _ =>
s""".add($keyName, org.apache.spark.sql.types.DataTypes.${key.dataType})"""
}
}.mkString("\\n").concat(";")
val generatedAggBufferSchema: String =
s"new org.apache.spark.sql.types.StructType()" +
bufferSchema.map { key =>
val keyName = ctx.addReferenceObj("keyName", key.name)
key.dataType match {
case d: DecimalType =>
s""".add($keyName, org.apache.spark.sql.types.DataTypes.createDecimalType(
|${d.precision}, ${d.scale}))""".stripMargin
case _ =>
s""".add($keyName, org.apache.spark.sql.types.DataTypes.${key.dataType})"""
}
}.mkString("\\n").concat(";")
s"""
| private ${classOf[OnHeapColumnVector].getName}[] vectors;
| private ${classOf[ColumnarBatch].getName} batch;
| private ${classOf[MutableColumnarRow].getName} aggBufferRow;
| private int[] buckets;
| private int capacity = 1 << 16;
| private double loadFactor = 0.5;
| private int numBuckets = (int) (capacity / loadFactor);
| private int maxSteps = 2;
| private int numRows = 0;
| private org.apache.spark.sql.types.StructType schema = $generatedSchema
| private org.apache.spark.sql.types.StructType aggregateBufferSchema =
| $generatedAggBufferSchema
|
| public $generatedClassName() {
| vectors = ${classOf[OnHeapColumnVector].getName}.allocateColumns(capacity, schema);
| batch = new ${classOf[ColumnarBatch].getName}(vectors);
|
| // Generates a projection to return the aggregate buffer only.
| ${classOf[OnHeapColumnVector].getName}[] aggBufferVectors =
| new ${classOf[OnHeapColumnVector].getName}[aggregateBufferSchema.fields().length];
| for (int i = 0; i < aggregateBufferSchema.fields().length; i++) {
| aggBufferVectors[i] = vectors[i + ${groupingKeys.length}];
| }
| aggBufferRow = new ${classOf[MutableColumnarRow].getName}(aggBufferVectors);
|
| buckets = new int[numBuckets];
| java.util.Arrays.fill(buckets, -1);
| }
""".stripMargin
}
/**
* Generates a method that returns true if the group-by keys exist at a given index in the
* associated [[org.apache.spark.sql.execution.vectorized.OnHeapColumnVector]]. For instance,
* if we have 2 long group-by keys, the generated function would be of the form:
*
* {{{
* private boolean equals(int idx, long agg_key, long agg_key1) {
* return vectors[0].getLong(buckets[idx]) == agg_key &&
* vectors[1].getLong(buckets[idx]) == agg_key1;
* }
* }}}
*/
protected def generateEquals(): String = {
def genEqualsForKeys(groupingKeys: Seq[Buffer]): String = {
groupingKeys.zipWithIndex.map { case (key: Buffer, ordinal: Int) =>
val value = ctx.getValueFromVector(s"vectors[$ordinal]", key.dataType, "buckets[idx]")
s"(${ctx.genEqual(key.dataType, value, key.name)})"
}.mkString(" && ")
}
s"""
|private boolean equals(int idx, $groupingKeySignature) {
| return ${genEqualsForKeys(groupingKeys)};
|}
""".stripMargin
}
/**
* Generates a method that returns a
* [[org.apache.spark.sql.execution.vectorized.MutableColumnarRow]] which keeps track of the
* aggregate value(s) for a given set of keys. If the corresponding row doesn't exist, the
* generated method adds the corresponding row in the associated
* [[org.apache.spark.sql.execution.vectorized.OnHeapColumnVector]]. For instance, if we
* have 2 long group-by keys, the generated function would be of the form:
*
* {{{
* public MutableColumnarRow findOrInsert(long agg_key, long agg_key1) {
* long h = hash(agg_key, agg_key1);
* int step = 0;
* int idx = (int) h & (numBuckets - 1);
* while (step < maxSteps) {
* // Return bucket index if it's either an empty slot or already contains the key
* if (buckets[idx] == -1) {
* if (numRows < capacity) {
* vectors[0].putLong(numRows, agg_key);
* vectors[1].putLong(numRows, agg_key1);
* vectors[2].putLong(numRows, 0);
* buckets[idx] = numRows++;
* aggBufferRow.rowId = numRows;
* return aggBufferRow;
* } else {
* // No more space
* return null;
* }
* } else if (equals(idx, agg_key, agg_key1)) {
* aggBufferRow.rowId = buckets[idx];
* return aggBufferRow;
* }
* idx = (idx + 1) & (numBuckets - 1);
* step++;
* }
* // Didn't find it
* return null;
* }
* }}}
*/
protected def generateFindOrInsert(): String = {
def genCodeToSetKeys(groupingKeys: Seq[Buffer]): Seq[String] = {
groupingKeys.zipWithIndex.map { case (key: Buffer, ordinal: Int) =>
ctx.setValue(s"vectors[$ordinal]", "numRows", key.dataType, key.name)
}
}
def genCodeToSetAggBuffers(bufferValues: Seq[Buffer]): Seq[String] = {
bufferValues.zipWithIndex.map { case (key: Buffer, ordinal: Int) =>
ctx.updateColumn(s"vectors[${groupingKeys.length + ordinal}]", "numRows", key.dataType,
buffVars(ordinal), nullable = true)
}
}
s"""
|public ${classOf[MutableColumnarRow].getName} findOrInsert($groupingKeySignature) {
| long h = hash(${groupingKeys.map(_.name).mkString(", ")});
| int step = 0;
| int idx = (int) h & (numBuckets - 1);
| while (step < maxSteps) {
| // Return bucket index if it's either an empty slot or already contains the key
| if (buckets[idx] == -1) {
| if (numRows < capacity) {
|
| // Initialize aggregate keys
| ${genCodeToSetKeys(groupingKeys).mkString("\\n")}
|
| ${buffVars.map(_.code).mkString("\\n")}
|
| // Initialize aggregate values
| ${genCodeToSetAggBuffers(bufferValues).mkString("\\n")}
|
| buckets[idx] = numRows++;
| aggBufferRow.rowId = buckets[idx];
| return aggBufferRow;
| } else {
| // No more space
| return null;
| }
| } else if (equals(idx, ${groupingKeys.map(_.name).mkString(", ")})) {
| aggBufferRow.rowId = buckets[idx];
| return aggBufferRow;
| }
| idx = (idx + 1) & (numBuckets - 1);
| step++;
| }
| // Didn't find it
| return null;
|}
""".stripMargin
}
protected def generateRowIterator(): String = {
s"""
|public java.util.Iterator<${classOf[InternalRow].getName}> rowIterator() {
| batch.setNumRows(numRows);
| return batch.rowIterator();
|}
""".stripMargin
}
}
| esi-mineset/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/VectorizedHashMapGenerator.scala | Scala | apache-2.0 | 10,233 |
package zzb.db
import com.mongodb.casbah._
import Mongos._
/**
* mongo操作
* Created by blackangel on 2014/7/18.
*/
trait MongoAccess {
def db[A](name:String)(f: MongoDB =>A)={
f(_db(name))
}
}
trait ExpressionNode{
def parent:Some[ExpressionNode]
def children:List[ExpressionNode]
}
trait LogicalBoolean extends ExpressionNode{
} | stepover/zzb | zzb-dbaccess/src/main/scala/zzb/db/MongoAccess.scala | Scala | mit | 352 |
/**
* This file is part of mycollab-web.
*
* mycollab-web is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* mycollab-web is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with mycollab-web. If not, see <http://www.gnu.org/licenses/>.
*/
package com.esofthead.mycollab.module.project.events
import com.esofthead.mycollab.eventmanager.ApplicationEvent
/**
* @author MyCollab Ltd
* @since 5.0.3
*/
object ProjectContentEvent {
class GotoDashboard(source: AnyRef) extends ApplicationEvent(source, null) {}
}
| uniteddiversity/mycollab | mycollab-web/src/main/scala/com/esofthead/mycollab/module/project/events/ProjectContentEvent.scala | Scala | agpl-3.0 | 970 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.csv
import java.io.{ByteArrayOutputStream, EOFException, File, FileOutputStream}
import java.nio.charset.{Charset, StandardCharsets, UnsupportedCharsetException}
import java.nio.file.{Files, StandardOpenOption}
import java.sql.{Date, Timestamp}
import java.text.SimpleDateFormat
import java.util.Locale
import java.util.zip.GZIPOutputStream
import scala.collection.JavaConverters._
import scala.util.Properties
import com.univocity.parsers.common.TextParsingException
import org.apache.commons.lang3.time.FastDateFormat
import org.apache.hadoop.io.SequenceFile.CompressionType
import org.apache.hadoop.io.compress.GzipCodec
import org.apache.log4j.{AppenderSkeleton, LogManager}
import org.apache.log4j.spi.LoggingEvent
import org.apache.spark.{SparkException, TestUtils}
import org.apache.spark.sql.{AnalysisException, DataFrame, QueryTest, Row}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
class CSVSuite extends QueryTest with SharedSparkSession with TestCsvData {
import testImplicits._
private val carsFile = "test-data/cars.csv"
private val carsMalformedFile = "test-data/cars-malformed.csv"
private val carsFile8859 = "test-data/cars_iso-8859-1.csv"
private val carsTsvFile = "test-data/cars.tsv"
private val carsAltFile = "test-data/cars-alternative.csv"
private val carsMultiCharDelimitedFile = "test-data/cars-multichar-delim.csv"
private val carsMultiCharCrazyDelimitedFile = "test-data/cars-multichar-delim-crazy.csv"
private val carsUnbalancedQuotesFile = "test-data/cars-unbalanced-quotes.csv"
private val carsNullFile = "test-data/cars-null.csv"
private val carsEmptyValueFile = "test-data/cars-empty-value.csv"
private val carsBlankColName = "test-data/cars-blank-column-name.csv"
private val carsCrlf = "test-data/cars-crlf.csv"
private val emptyFile = "test-data/empty.csv"
private val commentsFile = "test-data/comments.csv"
private val disableCommentsFile = "test-data/disable_comments.csv"
private val boolFile = "test-data/bool.csv"
private val decimalFile = "test-data/decimal.csv"
private val simpleSparseFile = "test-data/simple_sparse.csv"
private val numbersFile = "test-data/numbers.csv"
private val datesFile = "test-data/dates.csv"
private val unescapedQuotesFile = "test-data/unescaped-quotes.csv"
private val valueMalformedFile = "test-data/value-malformed.csv"
private val badAfterGoodFile = "test-data/bad_after_good.csv"
private val malformedRowFile = "test-data/malformedRow.csv"
/** Verifies data and schema. */
private def verifyCars(
df: DataFrame,
withHeader: Boolean,
numCars: Int = 3,
numFields: Int = 5,
checkHeader: Boolean = true,
checkValues: Boolean = true,
checkTypes: Boolean = false): Unit = {
val numColumns = numFields
val numRows = if (withHeader) numCars else numCars + 1
// schema
assert(df.schema.fieldNames.length === numColumns)
assert(df.count === numRows)
if (checkHeader) {
if (withHeader) {
assert(df.schema.fieldNames === Array("year", "make", "model", "comment", "blank"))
} else {
assert(df.schema.fieldNames === Array("_c0", "_c1", "_c2", "_c3", "_c4"))
}
}
if (checkValues) {
val yearValues = List("2012", "1997", "2015")
val actualYears = if (!withHeader) "year" :: yearValues else yearValues
val years = if (withHeader) df.select("year").collect() else df.select("_c0").collect()
years.zipWithIndex.foreach { case (year, index) =>
if (checkTypes) {
assert(year === Row(actualYears(index).toInt))
} else {
assert(year === Row(actualYears(index)))
}
}
}
}
test("simple csv test") {
val cars = spark
.read
.format("csv")
.option("header", "false")
.load(testFile(carsFile))
verifyCars(cars, withHeader = false, checkTypes = false)
}
test("simple csv test with calling another function to load") {
val cars = spark
.read
.option("header", "false")
.csv(testFile(carsFile))
verifyCars(cars, withHeader = false, checkTypes = false)
}
test("simple csv test with type inference") {
val cars = spark
.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(testFile(carsFile))
verifyCars(cars, withHeader = true, checkTypes = true)
}
test("simple csv test with string dataset") {
val csvDataset = spark.read.text(testFile(carsFile)).as[String]
val cars = spark.read
.option("header", "true")
.option("inferSchema", "true")
.csv(csvDataset)
verifyCars(cars, withHeader = true, checkTypes = true)
val carsWithoutHeader = spark.read
.option("header", "false")
.csv(csvDataset)
verifyCars(carsWithoutHeader, withHeader = false, checkTypes = false)
}
test("test inferring booleans") {
val result = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(testFile(boolFile))
val expectedSchema = StructType(List(
StructField("bool", BooleanType, nullable = true)))
assert(result.schema === expectedSchema)
}
test("test inferring decimals") {
val result = spark.read
.format("csv")
.option("comment", "~")
.option("header", "true")
.option("inferSchema", "true")
.load(testFile(decimalFile))
val expectedSchema = StructType(List(
StructField("decimal", DecimalType(20, 0), nullable = true),
StructField("long", LongType, nullable = true),
StructField("double", DoubleType, nullable = true)))
assert(result.schema === expectedSchema)
}
test("test with alternative delimiter and quote") {
val cars = spark.read
.format("csv")
.options(Map("quote" -> "\\'", "delimiter" -> "|", "header" -> "true"))
.load(testFile(carsAltFile))
verifyCars(cars, withHeader = true)
}
test("test with tab delimiter and double quote") {
val cars = spark.read
.options(Map("quote" -> "\\"", "delimiter" -> """\\t""", "header" -> "true"))
.csv(testFile(carsTsvFile))
verifyCars(cars, numFields = 6, withHeader = true, checkHeader = false)
}
test("SPARK-24540: test with multiple character delimiter (comma space)") {
val cars = spark.read
.options(Map("quote" -> "\\'", "delimiter" -> ", ", "header" -> "true"))
.csv(testFile(carsMultiCharDelimitedFile))
verifyCars(cars, withHeader = true)
}
test("SPARK-24540: test with multiple (crazy) character delimiter") {
val cars = spark.read
.options(Map("quote" -> "\\'", "delimiter" -> """_/-\\\\_""", "header" -> "true"))
.csv(testFile(carsMultiCharCrazyDelimitedFile))
verifyCars(cars, withHeader = true)
// check all the other columns, besides year (which is covered by verifyCars)
val otherCols = cars.select("make", "model", "comment", "blank").collect()
val expectedOtherColVals = Seq(
("Tesla", "S", "No comment", null),
("Ford", "E350", "Go get one now they are going fast", null),
("Chevy", "Volt", null, null)
)
expectedOtherColVals.zipWithIndex.foreach { case (values, index) =>
val actualRow = otherCols(index)
values match {
case (make, model, comment, blank) =>
assert(make == actualRow.getString(0))
assert(model == actualRow.getString(1))
assert(comment == actualRow.getString(2))
assert(blank == actualRow.getString(3))
}
}
}
test("parse unescaped quotes with maxCharsPerColumn") {
val rows = spark.read
.format("csv")
.option("maxCharsPerColumn", "4")
.load(testFile(unescapedQuotesFile))
val expectedRows = Seq(Row("\\"a\\"b", "ccc", "ddd"), Row("ab", "cc\\"c", "ddd\\""))
checkAnswer(rows, expectedRows)
}
test("bad encoding name") {
val exception = intercept[UnsupportedCharsetException] {
spark
.read
.format("csv")
.option("charset", "1-9588-osi")
.load(testFile(carsFile8859))
}
assert(exception.getMessage.contains("1-9588-osi"))
}
test("test different encoding") {
withView("carsTable") {
// scalastyle:off
spark.sql(
s"""
|CREATE TEMPORARY VIEW carsTable USING csv
|OPTIONS (path "${testFile(carsFile8859)}", header "true",
|charset "iso-8859-1", delimiter "þ")
""".stripMargin.replaceAll("\\n", " "))
// scalastyle:on
verifyCars(spark.table("carsTable"), withHeader = true)
}
}
test("crlf line separators in multiline mode") {
val cars = spark
.read
.format("csv")
.option("multiLine", "true")
.option("header", "true")
.load(testFile(carsCrlf))
verifyCars(cars, withHeader = true)
}
test("test aliases sep and encoding for delimiter and charset") {
// scalastyle:off
val cars = spark
.read
.format("csv")
.option("header", "true")
.option("encoding", "iso-8859-1")
.option("sep", "þ")
.load(testFile(carsFile8859))
// scalastyle:on
verifyCars(cars, withHeader = true)
}
test("DDL test with tab separated file") {
withView("carsTable") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW carsTable USING csv
|OPTIONS (path "${testFile(carsTsvFile)}", header "true", delimiter "\\t")
""".stripMargin.replaceAll("\\n", " "))
verifyCars(spark.table("carsTable"), numFields = 6, withHeader = true, checkHeader = false)
}
}
test("DDL test parsing decimal type") {
withView("carsTable") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW carsTable
|(yearMade double, makeName string, modelName string, priceTag decimal,
| comments string, grp string)
|USING csv
|OPTIONS (path "${testFile(carsTsvFile)}", header "true", delimiter "\\t")
""".stripMargin.replaceAll("\\n", " "))
assert(
spark.sql("SELECT makeName FROM carsTable where priceTag > 60000").collect().size === 1)
}
}
test("test for DROPMALFORMED parsing mode") {
withSQLConf(SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> "false") {
Seq(false, true).foreach { multiLine =>
val cars = spark.read
.format("csv")
.option("multiLine", multiLine)
.options(Map("header" -> "true", "mode" -> "dropmalformed"))
.load(testFile(carsFile))
assert(cars.select("year").collect().size === 2)
}
}
}
test("test for blank column names on read and select columns") {
val cars = spark.read
.format("csv")
.options(Map("header" -> "true", "inferSchema" -> "true"))
.load(testFile(carsBlankColName))
assert(cars.select("customer").collect().size == 2)
assert(cars.select("_c0").collect().size == 2)
assert(cars.select("_c1").collect().size == 2)
}
test("test for FAILFAST parsing mode") {
Seq(false, true).foreach { multiLine =>
val exception = intercept[SparkException] {
spark.read
.format("csv")
.option("multiLine", multiLine)
.options(Map("header" -> "true", "mode" -> "failfast"))
.load(testFile(carsFile)).collect()
}
assert(exception.getMessage.contains("Malformed CSV record"))
}
}
test("test for tokens more than the fields in the schema") {
val cars = spark
.read
.format("csv")
.option("header", "false")
.option("comment", "~")
.load(testFile(carsMalformedFile))
verifyCars(cars, withHeader = false, checkTypes = false)
}
test("test with null quote character") {
val cars = spark.read
.format("csv")
.option("header", "true")
.option("quote", "")
.load(testFile(carsUnbalancedQuotesFile))
verifyCars(cars, withHeader = true, checkValues = false)
}
test("test with empty file and known schema") {
val result = spark.read
.format("csv")
.schema(StructType(List(StructField("column", StringType, false))))
.load(testFile(emptyFile))
assert(result.collect.size === 0)
assert(result.schema.fieldNames.size === 1)
}
test("DDL test with empty file") {
withView("carsTable") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW carsTable
|(yearMade double, makeName string, modelName string, comments string, grp string)
|USING csv
|OPTIONS (path "${testFile(emptyFile)}", header "false")
""".stripMargin.replaceAll("\\n", " "))
assert(spark.sql("SELECT count(*) FROM carsTable").collect().head(0) === 0)
}
}
test("DDL test with schema") {
withView("carsTable") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW carsTable
|(yearMade double, makeName string, modelName string, comments string, blank string)
|USING csv
|OPTIONS (path "${testFile(carsFile)}", header "true")
""".stripMargin.replaceAll("\\n", " "))
val cars = spark.table("carsTable")
verifyCars(cars, withHeader = true, checkHeader = false, checkValues = false)
assert(
cars.schema.fieldNames === Array("yearMade", "makeName", "modelName", "comments", "blank"))
}
}
test("save csv") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val cars = spark.read
.format("csv")
.option("header", "true")
.load(testFile(carsFile))
cars.coalesce(1).write
.option("header", "true")
.csv(csvDir)
val carsCopy = spark.read
.format("csv")
.option("header", "true")
.load(csvDir)
verifyCars(carsCopy, withHeader = true)
}
}
test("save csv with quote") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val cars = spark.read
.format("csv")
.option("header", "true")
.load(testFile(carsFile))
cars.coalesce(1).write
.format("csv")
.option("header", "true")
.option("quote", "\\"")
.save(csvDir)
val carsCopy = spark.read
.format("csv")
.option("header", "true")
.option("quote", "\\"")
.load(csvDir)
verifyCars(carsCopy, withHeader = true)
}
}
test("save csv with quoteAll enabled") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val data = Seq(("test \\"quote\\"", 123, "it \\"works\\"!", "\\"very\\" well"))
val df = spark.createDataFrame(data)
// escapeQuotes should be true by default
df.coalesce(1).write
.format("csv")
.option("quote", "\\"")
.option("escape", "\\"")
.option("quoteAll", "true")
.save(csvDir)
val results = spark.read
.format("text")
.load(csvDir)
.collect()
val expected = "\\"test \\"\\"quote\\"\\"\\",\\"123\\",\\"it \\"\\"works\\"\\"!\\",\\"\\"\\"very\\"\\" well\\""
assert(results.toSeq.map(_.toSeq) === Seq(Seq(expected)))
}
}
test("save csv with quote escaping enabled") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val data = Seq(("test \\"quote\\"", 123, "it \\"works\\"!", "\\"very\\" well"))
val df = spark.createDataFrame(data)
// escapeQuotes should be true by default
df.coalesce(1).write
.format("csv")
.option("quote", "\\"")
.option("escape", "\\"")
.save(csvDir)
val results = spark.read
.format("text")
.load(csvDir)
.collect()
val expected = "\\"test \\"\\"quote\\"\\"\\",123,\\"it \\"\\"works\\"\\"!\\",\\"\\"\\"very\\"\\" well\\""
assert(results.toSeq.map(_.toSeq) === Seq(Seq(expected)))
}
}
test("save csv with quote escaping disabled") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val data = Seq(("test \\"quote\\"", 123, "it \\"works\\"!", "\\"very\\" well"))
val df = spark.createDataFrame(data)
// escapeQuotes should be true by default
df.coalesce(1).write
.format("csv")
.option("quote", "\\"")
.option("escapeQuotes", "false")
.option("escape", "\\"")
.save(csvDir)
val results = spark.read
.format("text")
.load(csvDir)
.collect()
val expected = "test \\"quote\\",123,it \\"works\\"!,\\"\\"\\"very\\"\\" well\\""
assert(results.toSeq.map(_.toSeq) === Seq(Seq(expected)))
}
}
test("save csv with quote escaping, using charToEscapeQuoteEscaping option") {
withTempPath { path =>
// original text
val df1 = Seq(
"""You are "beautiful"""",
"""Yes, \\"in the inside"\\"""
).toDF()
// text written in CSV with following options:
// quote character: "
// escape character: \\
// character to escape quote escaping: #
val df2 = Seq(
""""You are \\"beautiful\\""""",
""""Yes, #\\\\"in the inside\\"#\\""""
).toDF()
df2.coalesce(1).write.text(path.getAbsolutePath)
val df3 = spark.read
.format("csv")
.option("quote", "\\"")
.option("escape", "\\\\")
.option("charToEscapeQuoteEscaping", "#")
.load(path.getAbsolutePath)
checkAnswer(df1, df3)
}
}
test("SPARK-19018: Save csv with custom charset") {
// scalastyle:off nonascii
val content = "µß áâä ÁÂÄ"
// scalastyle:on nonascii
Seq("iso-8859-1", "utf-8", "utf-16", "utf-32", "windows-1250").foreach { encoding =>
withTempPath { path =>
val csvDir = new File(path, "csv")
Seq(content).toDF().write
.option("encoding", encoding)
.csv(csvDir.getCanonicalPath)
csvDir.listFiles().filter(_.getName.endsWith("csv")).foreach({ csvFile =>
val readback = Files.readAllBytes(csvFile.toPath)
val expected = (content + Properties.lineSeparator).getBytes(Charset.forName(encoding))
assert(readback === expected)
})
}
}
}
test("SPARK-19018: error handling for unsupported charsets") {
val exception = intercept[SparkException] {
withTempPath { path =>
val csvDir = new File(path, "csv").getCanonicalPath
Seq("a,A,c,A,b,B").toDF().write
.option("encoding", "1-9588-osi")
.csv(csvDir)
}
}
assert(exception.getCause.getMessage.contains("1-9588-osi"))
}
test("commented lines in CSV data") {
Seq("false", "true").foreach { multiLine =>
val results = spark.read
.format("csv")
.options(Map("comment" -> "~", "header" -> "false", "multiLine" -> multiLine))
.load(testFile(commentsFile))
.collect()
val expected =
Seq(Seq("1", "2", "3", "4", "5.01", "2015-08-20 15:57:00"),
Seq("6", "7", "8", "9", "0", "2015-08-21 16:58:01"),
Seq("1", "2", "3", "4", "5", "2015-08-23 18:00:42"))
assert(results.toSeq.map(_.toSeq) === expected)
}
}
test("inferring schema with commented lines in CSV data") {
val results = spark.read
.format("csv")
.options(Map("comment" -> "~", "header" -> "false", "inferSchema" -> "true"))
.option("timestampFormat", "yyyy-MM-dd HH:mm:ss")
.load(testFile(commentsFile))
.collect()
val expected =
Seq(Seq(1, 2, 3, 4, 5.01D, Timestamp.valueOf("2015-08-20 15:57:00")),
Seq(6, 7, 8, 9, 0, Timestamp.valueOf("2015-08-21 16:58:01")),
Seq(1, 2, 3, 4, 5, Timestamp.valueOf("2015-08-23 18:00:42")))
assert(results.toSeq.map(_.toSeq) === expected)
}
test("inferring timestamp types via custom date format") {
val options = Map(
"header" -> "true",
"inferSchema" -> "true",
"timestampFormat" -> "dd/MM/yyyy HH:mm")
val results = spark.read
.format("csv")
.options(options)
.load(testFile(datesFile))
.select("date")
.collect()
val dateFormat = new SimpleDateFormat("dd/MM/yyyy HH:mm", Locale.US)
val expected =
Seq(Seq(new Timestamp(dateFormat.parse("26/08/2015 18:00").getTime)),
Seq(new Timestamp(dateFormat.parse("27/10/2014 18:30").getTime)),
Seq(new Timestamp(dateFormat.parse("28/01/2016 20:00").getTime)))
assert(results.toSeq.map(_.toSeq) === expected)
}
test("load date types via custom date format") {
val customSchema = new StructType(Array(StructField("date", DateType, true)))
val options = Map(
"header" -> "true",
"inferSchema" -> "false",
"dateFormat" -> "dd/MM/yyyy HH:mm")
val results = spark.read
.format("csv")
.options(options)
.option("timeZone", "UTC")
.schema(customSchema)
.load(testFile(datesFile))
.select("date")
.collect()
val dateFormat = new SimpleDateFormat("dd/MM/yyyy hh:mm", Locale.US)
val expected = Seq(
new Date(dateFormat.parse("26/08/2015 18:00").getTime),
new Date(dateFormat.parse("27/10/2014 18:30").getTime),
new Date(dateFormat.parse("28/01/2016 20:00").getTime))
val dates = results.toSeq.map(_.toSeq.head)
expected.zip(dates).foreach {
case (expectedDate, date) =>
// As it truncates the hours, minutes and etc., we only check
// if the dates (days, months and years) are the same via `toString()`.
assert(expectedDate.toString === date.toString)
}
}
test("setting comment to null disables comment support") {
val results = spark.read
.format("csv")
.options(Map("comment" -> "", "header" -> "false"))
.load(testFile(disableCommentsFile))
.collect()
val expected =
Seq(
Seq("#1", "2", "3"),
Seq("4", "5", "6"))
assert(results.toSeq.map(_.toSeq) === expected)
}
test("nullable fields with user defined null value of \\"null\\"") {
// year,make,model,comment,blank
val dataSchema = StructType(List(
StructField("year", IntegerType, nullable = true),
StructField("make", StringType, nullable = false),
StructField("model", StringType, nullable = false),
StructField("comment", StringType, nullable = true),
StructField("blank", StringType, nullable = true)))
val cars = spark.read
.format("csv")
.schema(dataSchema)
.options(Map("header" -> "true", "nullValue" -> "null"))
.load(testFile(carsNullFile))
verifyCars(cars, withHeader = true, checkValues = false)
val results = cars.collect()
assert(results(0).toSeq === Array(2012, "Tesla", "S", null, null))
assert(results(2).toSeq === Array(null, "Chevy", "Volt", null, null))
}
test("empty fields with user defined empty values") {
// year,make,model,comment,blank
val dataSchema = StructType(List(
StructField("year", IntegerType, nullable = true),
StructField("make", StringType, nullable = false),
StructField("model", StringType, nullable = false),
StructField("comment", StringType, nullable = true),
StructField("blank", StringType, nullable = true)))
val cars = spark.read
.format("csv")
.schema(dataSchema)
.option("header", "true")
.option("emptyValue", "empty")
.load(testFile(carsEmptyValueFile))
verifyCars(cars, withHeader = true, checkValues = false)
val results = cars.collect()
assert(results(0).toSeq === Array(2012, "Tesla", "S", "empty", "empty"))
assert(results(1).toSeq ===
Array(1997, "Ford", "E350", "Go get one now they are going fast", null))
assert(results(2).toSeq === Array(2015, "Chevy", "Volt", null, "empty"))
}
test("save csv with empty fields with user defined empty values") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
// year,make,model,comment,blank
val dataSchema = StructType(List(
StructField("year", IntegerType, nullable = true),
StructField("make", StringType, nullable = false),
StructField("model", StringType, nullable = false),
StructField("comment", StringType, nullable = true),
StructField("blank", StringType, nullable = true)))
val cars = spark.read
.format("csv")
.schema(dataSchema)
.option("header", "true")
.option("nullValue", "NULL")
.load(testFile(carsEmptyValueFile))
cars.coalesce(1).write
.format("csv")
.option("header", "true")
.option("emptyValue", "empty")
.option("nullValue", null)
.save(csvDir)
val carsCopy = spark.read
.format("csv")
.schema(dataSchema)
.option("header", "true")
.load(csvDir)
verifyCars(carsCopy, withHeader = true, checkValues = false)
val results = carsCopy.collect()
assert(results(0).toSeq === Array(2012, "Tesla", "S", "empty", "empty"))
assert(results(1).toSeq ===
Array(1997, "Ford", "E350", "Go get one now they are going fast", null))
assert(results(2).toSeq === Array(2015, "Chevy", "Volt", null, "empty"))
}
}
test("save csv with compression codec option") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val cars = spark.read
.format("csv")
.option("header", "true")
.load(testFile(carsFile))
cars.coalesce(1).write
.format("csv")
.option("header", "true")
.option("compression", "gZiP")
.save(csvDir)
val compressedFiles = new File(csvDir).listFiles()
assert(compressedFiles.exists(_.getName.endsWith(".csv.gz")))
val carsCopy = spark.read
.format("csv")
.option("header", "true")
.load(csvDir)
verifyCars(carsCopy, withHeader = true)
}
}
test("SPARK-13543 Write the output as uncompressed via option()") {
val extraOptions = Map(
"mapreduce.output.fileoutputformat.compress" -> "true",
"mapreduce.output.fileoutputformat.compress.type" -> CompressionType.BLOCK.toString,
"mapreduce.map.output.compress" -> "true",
"mapreduce.map.output.compress.codec" -> classOf[GzipCodec].getName
)
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val cars = spark.read
.format("csv")
.option("header", "true")
.options(extraOptions)
.load(testFile(carsFile))
cars.coalesce(1).write
.format("csv")
.option("header", "true")
.option("compression", "none")
.options(extraOptions)
.save(csvDir)
val compressedFiles = new File(csvDir).listFiles()
assert(compressedFiles.exists(!_.getName.endsWith(".csv.gz")))
val carsCopy = spark.read
.format("csv")
.option("header", "true")
.options(extraOptions)
.load(csvDir)
verifyCars(carsCopy, withHeader = true)
}
}
test("Schema inference correctly identifies the datatype when data is sparse.") {
val df = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(testFile(simpleSparseFile))
assert(
df.schema.fields.map(field => field.dataType).sameElements(
Array(IntegerType, IntegerType, IntegerType, IntegerType)))
}
test("old csv data source name works") {
val cars = spark
.read
.format("com.databricks.spark.csv")
.option("header", "false")
.load(testFile(carsFile))
verifyCars(cars, withHeader = false, checkTypes = false)
}
test("nulls, NaNs and Infinity values can be parsed") {
val numbers = spark
.read
.format("csv")
.schema(StructType(List(
StructField("int", IntegerType, true),
StructField("long", LongType, true),
StructField("float", FloatType, true),
StructField("double", DoubleType, true)
)))
.options(Map(
"header" -> "true",
"mode" -> "DROPMALFORMED",
"nullValue" -> "--",
"nanValue" -> "NAN",
"negativeInf" -> "-INF",
"positiveInf" -> "INF"))
.load(testFile(numbersFile))
assert(numbers.count() == 8)
}
test("SPARK-15585 turn off quotations") {
val cars = spark.read
.format("csv")
.option("header", "true")
.option("quote", "")
.load(testFile(carsUnbalancedQuotesFile))
verifyCars(cars, withHeader = true, checkValues = false)
}
test("Write timestamps correctly in ISO8601 format by default") {
withTempDir { dir =>
val iso8601timestampsPath = s"${dir.getCanonicalPath}/iso8601timestamps.csv"
val timestamps = spark.read
.format("csv")
.option("inferSchema", "true")
.option("header", "true")
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.load(testFile(datesFile))
timestamps.write
.format("csv")
.option("header", "true")
.save(iso8601timestampsPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val iso8601Timestamps = spark.read
.format("csv")
.schema(stringSchema)
.option("header", "true")
.load(iso8601timestampsPath)
val iso8501 = FastDateFormat.getInstance("yyyy-MM-dd'T'HH:mm:ss.SSSXXX", Locale.US)
val expectedTimestamps = timestamps.collect().map { r =>
// This should be ISO8601 formatted string.
Row(iso8501.format(r.toSeq.head))
}
checkAnswer(iso8601Timestamps, expectedTimestamps)
}
}
test("Write dates correctly in ISO8601 format by default") {
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> "UTC") {
withTempDir { dir =>
val customSchema = new StructType(Array(StructField("date", DateType, true)))
val iso8601datesPath = s"${dir.getCanonicalPath}/iso8601dates.csv"
val dates = spark.read
.format("csv")
.schema(customSchema)
.option("header", "true")
.option("inferSchema", "false")
.option("dateFormat", "dd/MM/yyyy HH:mm")
.load(testFile(datesFile))
dates.write
.format("csv")
.option("header", "true")
.save(iso8601datesPath)
// This will load back the dates as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val iso8601dates = spark.read
.format("csv")
.schema(stringSchema)
.option("header", "true")
.load(iso8601datesPath)
val iso8501 = FastDateFormat.getInstance("yyyy-MM-dd", Locale.US)
val expectedDates = dates.collect().map { r =>
// This should be ISO8601 formatted string.
Row(iso8501.format(r.toSeq.head))
}
checkAnswer(iso8601dates, expectedDates)
}
}
}
test("Roundtrip in reading and writing timestamps") {
withTempDir { dir =>
val iso8601timestampsPath = s"${dir.getCanonicalPath}/iso8601timestamps.csv"
val timestamps = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(testFile(datesFile))
timestamps.write
.format("csv")
.option("header", "true")
.save(iso8601timestampsPath)
val iso8601timestamps = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(iso8601timestampsPath)
checkAnswer(iso8601timestamps, timestamps)
}
}
test("Write dates correctly with dateFormat option") {
val customSchema = new StructType(Array(StructField("date", DateType, true)))
withTempDir { dir =>
// With dateFormat option.
val datesWithFormatPath = s"${dir.getCanonicalPath}/datesWithFormat.csv"
val datesWithFormat = spark.read
.format("csv")
.schema(customSchema)
.option("header", "true")
.option("dateFormat", "dd/MM/yyyy HH:mm")
.load(testFile(datesFile))
datesWithFormat.write
.format("csv")
.option("header", "true")
.option("dateFormat", "yyyy/MM/dd")
.save(datesWithFormatPath)
// This will load back the dates as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringDatesWithFormat = spark.read
.format("csv")
.schema(stringSchema)
.option("header", "true")
.load(datesWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/26"),
Row("2014/10/27"),
Row("2016/01/28"))
checkAnswer(stringDatesWithFormat, expectedStringDatesWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option") {
withTempDir { dir =>
// With dateFormat option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.csv"
val timestampsWithFormat = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.load(testFile(datesFile))
timestampsWithFormat.write
.format("csv")
.option("header", "true")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.format("csv")
.schema(stringSchema)
.option("header", "true")
.load(timestampsWithFormatPath)
val expectedStringTimestampsWithFormat = Seq(
Row("2015/08/26 18:00"),
Row("2014/10/27 18:30"),
Row("2016/01/28 20:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringTimestampsWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option and timeZone option") {
withTempDir { dir =>
// With dateFormat option and timeZone option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.csv"
val timestampsWithFormat = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.load(testFile(datesFile))
timestampsWithFormat.write
.format("csv")
.option("header", "true")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "GMT")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.format("csv")
.schema(stringSchema)
.option("header", "true")
.load(timestampsWithFormatPath)
val expectedStringTimestampsWithFormat = Seq(
Row("2015/08/27 01:00"),
Row("2014/10/28 01:30"),
Row("2016/01/29 04:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringTimestampsWithFormat)
val readBack = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "GMT")
.load(timestampsWithFormatPath)
checkAnswer(readBack, timestampsWithFormat)
}
}
test("load duplicated field names consistently with null or empty strings - case sensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
withTempPath { path =>
Seq("a,a,c,A,b,B").toDF().write.text(path.getAbsolutePath)
val actualSchema = spark.read
.format("csv")
.option("header", true)
.load(path.getAbsolutePath)
.schema
val fields = Seq("a0", "a1", "c", "A", "b", "B").map(StructField(_, StringType, true))
val expectedSchema = StructType(fields)
assert(actualSchema == expectedSchema)
}
}
}
test("load duplicated field names consistently with null or empty strings - case insensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTempPath { path =>
Seq("a,A,c,A,b,B").toDF().write.text(path.getAbsolutePath)
val actualSchema = spark.read
.format("csv")
.option("header", true)
.load(path.getAbsolutePath)
.schema
val fields = Seq("a0", "A1", "c", "A3", "b4", "B5").map(StructField(_, StringType, true))
val expectedSchema = StructType(fields)
assert(actualSchema == expectedSchema)
}
}
}
test("load null when the schema is larger than parsed tokens ") {
withTempPath { path =>
Seq("1").toDF().write.text(path.getAbsolutePath)
val schema = StructType(
StructField("a", IntegerType, true) ::
StructField("b", IntegerType, true) :: Nil)
val df = spark.read
.schema(schema)
.option("header", "false")
.csv(path.getAbsolutePath)
checkAnswer(df, Row(1, null))
}
}
test("SPARK-18699 put malformed records in a `columnNameOfCorruptRecord` field") {
Seq(false, true).foreach { multiLine =>
val schema = new StructType().add("a", IntegerType).add("b", DateType)
// We use `PERMISSIVE` mode by default if invalid string is given.
val df1 = spark
.read
.option("mode", "abcd")
.option("multiLine", multiLine)
.schema(schema)
.csv(testFile(valueMalformedFile))
checkAnswer(df1,
Row(0, null) ::
Row(1, java.sql.Date.valueOf("1983-08-04")) ::
Nil)
// If `schema` has `columnNameOfCorruptRecord`, it should handle corrupt records
val columnNameOfCorruptRecord = "_unparsed"
val schemaWithCorrField1 = schema.add(columnNameOfCorruptRecord, StringType)
val df2 = spark
.read
.option("mode", "Permissive")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.option("multiLine", multiLine)
.schema(schemaWithCorrField1)
.csv(testFile(valueMalformedFile))
checkAnswer(df2,
Row(0, null, "0,2013-111-11 12:13:14") ::
Row(1, java.sql.Date.valueOf("1983-08-04"), null) ::
Nil)
// We put a `columnNameOfCorruptRecord` field in the middle of a schema
val schemaWithCorrField2 = new StructType()
.add("a", IntegerType)
.add(columnNameOfCorruptRecord, StringType)
.add("b", DateType)
val df3 = spark
.read
.option("mode", "permissive")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.option("multiLine", multiLine)
.schema(schemaWithCorrField2)
.csv(testFile(valueMalformedFile))
checkAnswer(df3,
Row(0, "0,2013-111-11 12:13:14", null) ::
Row(1, null, java.sql.Date.valueOf("1983-08-04")) ::
Nil)
val errMsg = intercept[AnalysisException] {
spark
.read
.option("mode", "PERMISSIVE")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.option("multiLine", multiLine)
.schema(schema.add(columnNameOfCorruptRecord, IntegerType))
.csv(testFile(valueMalformedFile))
.collect
}.getMessage
assert(errMsg.startsWith("The field for corrupt records must be string type and nullable"))
}
}
test("Enabling/disabling ignoreCorruptFiles") {
val inputFile = File.createTempFile("input-", ".gz")
try {
// Create a corrupt gzip file
val byteOutput = new ByteArrayOutputStream()
val gzip = new GZIPOutputStream(byteOutput)
try {
gzip.write(Array[Byte](1, 2, 3, 4))
} finally {
gzip.close()
}
val bytes = byteOutput.toByteArray
val o = new FileOutputStream(inputFile)
try {
// It's corrupt since we only write half of bytes into the file.
o.write(bytes.take(bytes.length / 2))
} finally {
o.close()
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") {
val e = intercept[SparkException] {
spark.read.csv(inputFile.toURI.toString).collect()
}
assert(e.getCause.isInstanceOf[EOFException])
assert(e.getCause.getMessage === "Unexpected end of input stream")
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "true") {
assert(spark.read.csv(inputFile.toURI.toString).collect().isEmpty)
}
} finally {
inputFile.delete()
}
}
test("SPARK-19610: Parse normal multi-line CSV files") {
val primitiveFieldAndType = Seq(
""""
|string","integer
|
|
|","long
|
|","bigInteger",double,boolean,null""".stripMargin,
""""this is a
|simple
|string.","
|
|10","
|21474836470","92233720368547758070","
|
|1.7976931348623157E308",true,""".stripMargin)
withTempPath { path =>
primitiveFieldAndType.toDF("value").coalesce(1).write.text(path.getAbsolutePath)
val df = spark.read
.option("header", true)
.option("multiLine", true)
.csv(path.getAbsolutePath)
// Check if headers have new lines in the names.
val actualFields = df.schema.fieldNames.toSeq
val expectedFields =
Seq("\\nstring", "integer\\n\\n\\n", "long\\n\\n", "bigInteger", "double", "boolean", "null")
assert(actualFields === expectedFields)
// Check if the rows have new lines in the values.
val expected = Row(
"this is a\\nsimple\\nstring.",
"\\n\\n10",
"\\n21474836470",
"92233720368547758070",
"\\n\\n1.7976931348623157E308",
"true",
null)
checkAnswer(df, expected)
}
}
test("Empty file produces empty dataframe with empty schema") {
Seq(false, true).foreach { multiLine =>
val df = spark.read.format("csv")
.option("header", true)
.option("multiLine", multiLine)
.load(testFile(emptyFile))
assert(df.schema === spark.emptyDataFrame.schema)
checkAnswer(df, spark.emptyDataFrame)
}
}
test("Empty string dataset produces empty dataframe and keep user-defined schema") {
val df1 = spark.read.csv(spark.emptyDataset[String])
assert(df1.schema === spark.emptyDataFrame.schema)
checkAnswer(df1, spark.emptyDataFrame)
val schema = StructType(StructField("a", StringType) :: Nil)
val df2 = spark.read.schema(schema).csv(spark.emptyDataset[String])
assert(df2.schema === schema)
}
test("ignoreLeadingWhiteSpace and ignoreTrailingWhiteSpace options - read") {
val input = " a,b , c "
// For reading, default of both `ignoreLeadingWhiteSpace` and`ignoreTrailingWhiteSpace`
// are `false`. So, these are excluded.
val combinations = Seq(
(true, true),
(false, true),
(true, false))
// Check if read rows ignore whitespaces as configured.
val expectedRows = Seq(
Row("a", "b", "c"),
Row(" a", "b", " c"),
Row("a", "b ", "c "))
combinations.zip(expectedRows)
.foreach { case ((ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace), expected) =>
val df = spark.read
.option("ignoreLeadingWhiteSpace", ignoreLeadingWhiteSpace)
.option("ignoreTrailingWhiteSpace", ignoreTrailingWhiteSpace)
.csv(Seq(input).toDS())
checkAnswer(df, expected)
}
}
test("SPARK-18579: ignoreLeadingWhiteSpace and ignoreTrailingWhiteSpace options - write") {
val df = Seq((" a", "b ", " c ")).toDF()
// For writing, default of both `ignoreLeadingWhiteSpace` and `ignoreTrailingWhiteSpace`
// are `true`. So, these are excluded.
val combinations = Seq(
(false, false),
(false, true),
(true, false))
// Check if written lines ignore each whitespaces as configured.
val expectedLines = Seq(
" a,b , c ",
" a,b, c",
"a,b ,c ")
combinations.zip(expectedLines)
.foreach { case ((ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace), expected) =>
withTempPath { path =>
df.write
.option("ignoreLeadingWhiteSpace", ignoreLeadingWhiteSpace)
.option("ignoreTrailingWhiteSpace", ignoreTrailingWhiteSpace)
.csv(path.getAbsolutePath)
// Read back the written lines.
val readBack = spark.read.text(path.getAbsolutePath)
checkAnswer(readBack, Row(expected))
}
}
}
test("SPARK-21263: Invalid float and double are handled correctly in different modes") {
val exception = intercept[SparkException] {
spark.read.schema("a DOUBLE")
.option("mode", "FAILFAST")
.csv(Seq("10u12").toDS())
.collect()
}
assert(exception.getMessage.contains("""input string: "10u12""""))
val count = spark.read.schema("a FLOAT")
.option("mode", "DROPMALFORMED")
.csv(Seq("10u12").toDS())
.count()
assert(count == 0)
val results = spark.read.schema("a FLOAT")
.option("mode", "PERMISSIVE")
.csv(Seq("10u12").toDS())
checkAnswer(results, Row(null))
}
test("SPARK-20978: Fill the malformed column when the number of tokens is less than schema") {
val df = spark.read
.schema("a string, b string, unparsed string")
.option("columnNameOfCorruptRecord", "unparsed")
.csv(Seq("a").toDS())
checkAnswer(df, Row("a", null, "a"))
}
test("SPARK-21610: Corrupt records are not handled properly when creating a dataframe " +
"from a file") {
val columnNameOfCorruptRecord = "_corrupt_record"
val schema = new StructType()
.add("a", IntegerType)
.add("b", DateType)
.add(columnNameOfCorruptRecord, StringType)
// negative cases
val msg = intercept[AnalysisException] {
spark
.read
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.csv(testFile(valueMalformedFile))
.select(columnNameOfCorruptRecord)
.collect()
}.getMessage
assert(msg.contains("only include the internal corrupt record column"))
// workaround
val df = spark
.read
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.csv(testFile(valueMalformedFile))
.cache()
assert(df.filter($"_corrupt_record".isNotNull).count() == 1)
assert(df.filter($"_corrupt_record".isNull).count() == 1)
checkAnswer(
df.select(columnNameOfCorruptRecord),
Row("0,2013-111-11 12:13:14") :: Row(null) :: Nil
)
}
test("SPARK-23846: schema inferring touches less data if samplingRatio < 1.0") {
// Set default values for the DataSource parameters to make sure
// that whole test file is mapped to only one partition. This will guarantee
// reliable sampling of the input file.
withSQLConf(
SQLConf.FILES_MAX_PARTITION_BYTES.key -> (128 * 1024 * 1024).toString,
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> (4 * 1024 * 1024).toString
)(withTempPath { path =>
val ds = sampledTestData.coalesce(1)
ds.write.text(path.getAbsolutePath)
val readback = spark.read
.option("inferSchema", true).option("samplingRatio", 0.1)
.csv(path.getCanonicalPath)
assert(readback.schema == new StructType().add("_c0", IntegerType))
})
}
test("SPARK-23846: usage of samplingRatio while parsing a dataset of strings") {
val ds = sampledTestData.coalesce(1)
val readback = spark.read
.option("inferSchema", true).option("samplingRatio", 0.1)
.csv(ds)
assert(readback.schema == new StructType().add("_c0", IntegerType))
}
test("SPARK-23846: samplingRatio is out of the range (0, 1.0]") {
val ds = spark.range(0, 100, 1, 1).map(_.toString)
val errorMsg0 = intercept[IllegalArgumentException] {
spark.read.option("inferSchema", true).option("samplingRatio", -1).csv(ds)
}.getMessage
assert(errorMsg0.contains("samplingRatio (-1.0) should be greater than 0"))
val errorMsg1 = intercept[IllegalArgumentException] {
spark.read.option("inferSchema", true).option("samplingRatio", 0).csv(ds)
}.getMessage
assert(errorMsg1.contains("samplingRatio (0.0) should be greater than 0"))
val sampled = spark.read.option("inferSchema", true).option("samplingRatio", 1.0).csv(ds)
assert(sampled.count() == ds.count())
}
test("SPARK-17916: An empty string should not be coerced to null when nullValue is passed.") {
val litNull: String = null
val df = Seq(
(1, "John Doe"),
(2, ""),
(3, "-"),
(4, litNull)
).toDF("id", "name")
// Checks for new behavior where an empty string is not coerced to null when `nullValue` is
// set to anything but an empty string literal.
withTempPath { path =>
df.write
.option("nullValue", "-")
.csv(path.getAbsolutePath)
val computed = spark.read
.option("nullValue", "-")
.schema(df.schema)
.csv(path.getAbsolutePath)
val expected = Seq(
(1, "John Doe"),
(2, ""),
(3, litNull),
(4, litNull)
).toDF("id", "name")
checkAnswer(computed, expected)
}
// Keeps the old behavior where empty string us coerced to nullValue is not passed.
withTempPath { path =>
df.write
.csv(path.getAbsolutePath)
val computed = spark.read
.schema(df.schema)
.csv(path.getAbsolutePath)
val expected = Seq(
(1, "John Doe"),
(2, litNull),
(3, "-"),
(4, litNull)
).toDF("id", "name")
checkAnswer(computed, expected)
}
}
test("SPARK-25241: An empty string should not be coerced to null when emptyValue is passed.") {
val litNull: String = null
val df = Seq(
(1, "John Doe"),
(2, ""),
(3, "-"),
(4, litNull)
).toDF("id", "name")
// Checks for new behavior where a null is not coerced to an empty string when `emptyValue` is
// set to anything but an empty string literal.
withTempPath { path =>
df.write
.option("emptyValue", "-")
.csv(path.getAbsolutePath)
val computed = spark.read
.option("emptyValue", "-")
.schema(df.schema)
.csv(path.getAbsolutePath)
val expected = Seq(
(1, "John Doe"),
(2, "-"),
(3, "-"),
(4, "-")
).toDF("id", "name")
checkAnswer(computed, expected)
}
// Keeps the old behavior where empty string us coerced to emptyValue is not passed.
withTempPath { path =>
df.write
.csv(path.getAbsolutePath)
val computed = spark.read
.schema(df.schema)
.csv(path.getAbsolutePath)
val expected = Seq(
(1, "John Doe"),
(2, litNull),
(3, "-"),
(4, litNull)
).toDF("id", "name")
checkAnswer(computed, expected)
}
}
test("SPARK-24329: skip lines with comments, and one or multiple whitespaces") {
val schema = new StructType().add("colA", StringType)
val ds = spark
.read
.schema(schema)
.option("multiLine", false)
.option("header", true)
.option("comment", "#")
.option("ignoreLeadingWhiteSpace", false)
.option("ignoreTrailingWhiteSpace", false)
.csv(testFile("test-data/comments-whitespaces.csv"))
checkAnswer(ds, Seq(Row(""" "a" """)))
}
test("SPARK-24244: Select a subset of all columns") {
withTempPath { path =>
import collection.JavaConverters._
val schema = new StructType()
.add("f1", IntegerType).add("f2", IntegerType).add("f3", IntegerType)
.add("f4", IntegerType).add("f5", IntegerType).add("f6", IntegerType)
.add("f7", IntegerType).add("f8", IntegerType).add("f9", IntegerType)
.add("f10", IntegerType).add("f11", IntegerType).add("f12", IntegerType)
.add("f13", IntegerType).add("f14", IntegerType).add("f15", IntegerType)
val odf = spark.createDataFrame(List(
Row(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),
Row(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15)
).asJava, schema)
odf.write.csv(path.getCanonicalPath)
val idf = spark.read
.schema(schema)
.csv(path.getCanonicalPath)
.select('f15, 'f10, 'f5)
assert(idf.count() == 2)
checkAnswer(idf, List(Row(15, 10, 5), Row(-15, -10, -5)))
}
}
def checkHeader(multiLine: Boolean): Unit = {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
withTempPath { path =>
val oschema = new StructType().add("f1", DoubleType).add("f2", DoubleType)
val odf = spark.createDataFrame(List(Row(1.0, 1234.5)).asJava, oschema)
odf.write.option("header", true).csv(path.getCanonicalPath)
val ischema = new StructType().add("f2", DoubleType).add("f1", DoubleType)
val exception = intercept[SparkException] {
spark.read
.schema(ischema)
.option("multiLine", multiLine)
.option("header", true)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
.collect()
}
assert(exception.getMessage.contains("CSV header does not conform to the schema"))
val shortSchema = new StructType().add("f1", DoubleType)
val exceptionForShortSchema = intercept[SparkException] {
spark.read
.schema(shortSchema)
.option("multiLine", multiLine)
.option("header", true)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
.collect()
}
assert(exceptionForShortSchema.getMessage.contains(
"Number of column in CSV header is not equal to number of fields in the schema"))
val longSchema = new StructType()
.add("f1", DoubleType)
.add("f2", DoubleType)
.add("f3", DoubleType)
val exceptionForLongSchema = intercept[SparkException] {
spark.read
.schema(longSchema)
.option("multiLine", multiLine)
.option("header", true)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
.collect()
}
assert(exceptionForLongSchema.getMessage.contains("Header length: 2, schema size: 3"))
val caseSensitiveSchema = new StructType().add("F1", DoubleType).add("f2", DoubleType)
val caseSensitiveException = intercept[SparkException] {
spark.read
.schema(caseSensitiveSchema)
.option("multiLine", multiLine)
.option("header", true)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
.collect()
}
assert(caseSensitiveException.getMessage.contains(
"CSV header does not conform to the schema"))
}
}
}
test(s"SPARK-23786: Checking column names against schema in the multiline mode") {
checkHeader(multiLine = true)
}
test(s"SPARK-23786: Checking column names against schema in the per-line mode") {
checkHeader(multiLine = false)
}
test("SPARK-23786: CSV header must not be checked if it doesn't exist") {
withTempPath { path =>
val oschema = new StructType().add("f1", DoubleType).add("f2", DoubleType)
val odf = spark.createDataFrame(List(Row(1.0, 1234.5)).asJava, oschema)
odf.write.option("header", false).csv(path.getCanonicalPath)
val ischema = new StructType().add("f2", DoubleType).add("f1", DoubleType)
val idf = spark.read
.schema(ischema)
.option("header", false)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
checkAnswer(idf, odf)
}
}
test("SPARK-23786: Ignore column name case if spark.sql.caseSensitive is false") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTempPath { path =>
val oschema = new StructType().add("A", StringType)
val odf = spark.createDataFrame(List(Row("0")).asJava, oschema)
odf.write.option("header", true).csv(path.getCanonicalPath)
val ischema = new StructType().add("a", StringType)
val idf = spark.read.schema(ischema)
.option("header", true)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
checkAnswer(idf, odf)
}
}
}
test("SPARK-23786: check header on parsing of dataset of strings") {
val ds = Seq("columnA,columnB", "1.0,1000.0").toDS()
val ischema = new StructType().add("columnB", DoubleType).add("columnA", DoubleType)
val exception = intercept[IllegalArgumentException] {
spark.read.schema(ischema).option("header", true).option("enforceSchema", false).csv(ds)
}
assert(exception.getMessage.contains("CSV header does not conform to the schema"))
}
test("SPARK-23786: enforce inferred schema") {
val expectedSchema = new StructType().add("_c0", DoubleType).add("_c1", StringType)
val withHeader = spark.read
.option("inferSchema", true)
.option("enforceSchema", false)
.option("header", true)
.csv(Seq("_c0,_c1", "1.0,a").toDS())
assert(withHeader.schema == expectedSchema)
checkAnswer(withHeader, Seq(Row(1.0, "a")))
// Ignore the inferSchema flag if an user sets a schema
val schema = new StructType().add("colA", DoubleType).add("colB", StringType)
val ds = spark.read
.option("inferSchema", true)
.option("enforceSchema", false)
.option("header", true)
.schema(schema)
.csv(Seq("colA,colB", "1.0,a").toDS())
assert(ds.schema == schema)
checkAnswer(ds, Seq(Row(1.0, "a")))
val exception = intercept[IllegalArgumentException] {
spark.read
.option("inferSchema", true)
.option("enforceSchema", false)
.option("header", true)
.schema(schema)
.csv(Seq("col1,col2", "1.0,a").toDS())
}
assert(exception.getMessage.contains("CSV header does not conform to the schema"))
}
test("SPARK-23786: warning should be printed if CSV header doesn't conform to schema") {
class TestAppender extends AppenderSkeleton {
var events = new java.util.ArrayList[LoggingEvent]
override def close(): Unit = {}
override def requiresLayout: Boolean = false
protected def append(event: LoggingEvent): Unit = events.add(event)
}
val testAppender1 = new TestAppender
withLogAppender(testAppender1) {
val ds = Seq("columnA,columnB", "1.0,1000.0").toDS()
val ischema = new StructType().add("columnB", DoubleType).add("columnA", DoubleType)
spark.read.schema(ischema).option("header", true).option("enforceSchema", true).csv(ds)
}
assert(testAppender1.events.asScala
.exists(msg => msg.getRenderedMessage.contains("CSV header does not conform to the schema")))
val testAppender2 = new TestAppender
withLogAppender(testAppender2) {
withTempPath { path =>
val oschema = new StructType().add("f1", DoubleType).add("f2", DoubleType)
val odf = spark.createDataFrame(List(Row(1.0, 1234.5)).asJava, oschema)
odf.write.option("header", true).csv(path.getCanonicalPath)
val ischema = new StructType().add("f2", DoubleType).add("f1", DoubleType)
spark.read
.schema(ischema)
.option("header", true)
.option("enforceSchema", true)
.csv(path.getCanonicalPath)
.collect()
}
}
assert(testAppender2.events.asScala
.exists(msg => msg.getRenderedMessage.contains("CSV header does not conform to the schema")))
}
test("SPARK-25134: check header on parsing of dataset with projection and column pruning") {
withSQLConf(SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> "true") {
Seq(false, true).foreach { multiLine =>
withTempPath { path =>
val dir = path.getAbsolutePath
Seq(("a", "b")).toDF("columnA", "columnB").write
.format("csv")
.option("header", true)
.save(dir)
// schema with one column
checkAnswer(spark.read
.format("csv")
.option("header", true)
.option("enforceSchema", false)
.option("multiLine", multiLine)
.load(dir)
.select("columnA"),
Row("a"))
// empty schema
assert(spark.read
.format("csv")
.option("header", true)
.option("enforceSchema", false)
.option("multiLine", multiLine)
.load(dir)
.count() === 1L)
}
}
}
}
test("SPARK-24645 skip parsing when columnPruning enabled and partitions scanned only") {
withSQLConf(SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> "true") {
withTempPath { path =>
val dir = path.getAbsolutePath
spark.range(10).selectExpr("id % 2 AS p", "id").write.partitionBy("p").csv(dir)
checkAnswer(spark.read.csv(dir).selectExpr("sum(p)"), Row(5))
}
}
}
test("SPARK-24676 project required data from parsed data when columnPruning disabled") {
withSQLConf(SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> "false") {
withTempPath { path =>
val dir = path.getAbsolutePath
spark.range(10).selectExpr("id % 2 AS p", "id AS c0", "id AS c1").write.partitionBy("p")
.option("header", "true").csv(dir)
val df1 = spark.read.option("header", true).csv(dir).selectExpr("sum(p)", "count(c0)")
checkAnswer(df1, Row(5, 10))
// empty required column case
val df2 = spark.read.option("header", true).csv(dir).selectExpr("sum(p)")
checkAnswer(df2, Row(5))
}
// the case where tokens length != parsedSchema length
withTempPath { path =>
val dir = path.getAbsolutePath
Seq("1,2").toDF().write.text(dir)
// more tokens
val df1 = spark.read.schema("c0 int").format("csv").option("mode", "permissive").load(dir)
checkAnswer(df1, Row(1))
// less tokens
val df2 = spark.read.schema("c0 int, c1 int, c2 int").format("csv")
.option("mode", "permissive").load(dir)
checkAnswer(df2, Row(1, 2, null))
}
}
}
test("count() for malformed input") {
def countForMalformedCSV(expected: Long, input: Seq[String]): Unit = {
val schema = new StructType().add("a", IntegerType)
val strings = spark.createDataset(input)
val df = spark.read.schema(schema).option("header", false).csv(strings)
assert(df.count() == expected)
}
def checkCount(expected: Long): Unit = {
val validRec = "1"
val inputs = Seq(
Seq("{-}", validRec),
Seq(validRec, "?"),
Seq("0xAC", validRec),
Seq(validRec, "0.314"),
Seq("\\\\\\\\\\\\", validRec)
)
inputs.foreach { input =>
countForMalformedCSV(expected, input)
}
}
checkCount(2)
countForMalformedCSV(0, Seq(""))
}
test("SPARK-25387: bad input should not cause NPE") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
val input = spark.createDataset(Seq("\\u0000\\u0000\\u0001234"))
checkAnswer(spark.read.schema(schema).csv(input), Row(null))
checkAnswer(spark.read.option("multiLine", true).schema(schema).csv(input), Row(null))
assert(spark.read.csv(input).collect().toSet == Set(Row()))
}
test("field names of inferred schema shouldn't compare to the first row") {
val input = Seq("1,2").toDS()
val df = spark.read.option("enforceSchema", false).csv(input)
checkAnswer(df, Row("1", "2"))
}
test("using the backward slash as the delimiter") {
val input = Seq("""abc\\1""").toDS()
val delimiter = """\\\\"""
checkAnswer(spark.read.option("delimiter", delimiter).csv(input), Row("abc", "1"))
checkAnswer(spark.read.option("inferSchema", true).option("delimiter", delimiter).csv(input),
Row("abc", 1))
val schema = new StructType().add("a", StringType).add("b", IntegerType)
checkAnswer(spark.read.schema(schema).option("delimiter", delimiter).csv(input), Row("abc", 1))
}
test("using spark.sql.columnNameOfCorruptRecord") {
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
val csv = "\\""
val df = spark.read
.schema("a int, _unparsed string")
.csv(Seq(csv).toDS())
checkAnswer(df, Row(null, csv))
}
}
test("encoding in multiLine mode") {
val df = spark.range(3).toDF()
Seq("UTF-8", "ISO-8859-1", "CP1251", "US-ASCII", "UTF-16BE", "UTF-32LE").foreach { encoding =>
Seq(true, false).foreach { header =>
withTempPath { path =>
df.write
.option("encoding", encoding)
.option("header", header)
.csv(path.getCanonicalPath)
val readback = spark.read
.option("multiLine", true)
.option("encoding", encoding)
.option("inferSchema", true)
.option("header", header)
.csv(path.getCanonicalPath)
checkAnswer(readback, df)
}
}
}
}
test("""Support line separator - default value \\r, \\r\\n and \\n""") {
val data = "\\"a\\",1\\r\\"c\\",2\\r\\n\\"d\\",3\\n"
withTempPath { path =>
Files.write(path.toPath, data.getBytes(StandardCharsets.UTF_8))
val df = spark.read.option("inferSchema", true).csv(path.getAbsolutePath)
val expectedSchema =
StructType(StructField("_c0", StringType) :: StructField("_c1", IntegerType) :: Nil)
checkAnswer(df, Seq(("a", 1), ("c", 2), ("d", 3)).toDF())
assert(df.schema === expectedSchema)
}
}
def testLineSeparator(lineSep: String, encoding: String, inferSchema: Boolean, id: Int): Unit = {
test(s"Support line separator in ${encoding} #${id}") {
// Read
val data =
s""""a",1$lineSep
|c,2$lineSep"
|d",3""".stripMargin
val dataWithTrailingLineSep = s"$data$lineSep"
Seq(data, dataWithTrailingLineSep).foreach { lines =>
withTempPath { path =>
Files.write(path.toPath, lines.getBytes(encoding))
val schema = StructType(StructField("_c0", StringType)
:: StructField("_c1", LongType) :: Nil)
val expected = Seq(("a", 1), ("\\nc", 2), ("\\nd", 3))
.toDF("_c0", "_c1")
Seq(false, true).foreach { multiLine =>
val reader = spark
.read
.option("lineSep", lineSep)
.option("multiLine", multiLine)
.option("encoding", encoding)
val df = if (inferSchema) {
reader.option("inferSchema", true).csv(path.getAbsolutePath)
} else {
reader.schema(schema).csv(path.getAbsolutePath)
}
checkAnswer(df, expected)
}
}
}
// Write
withTempPath { path =>
Seq("a", "b", "c").toDF("value").coalesce(1)
.write
.option("lineSep", lineSep)
.option("encoding", encoding)
.csv(path.getAbsolutePath)
val partFile = TestUtils.recursiveList(path).filter(f => f.getName.startsWith("part-")).head
val readBack = new String(Files.readAllBytes(partFile.toPath), encoding)
assert(
readBack === s"a${lineSep}b${lineSep}c${lineSep}")
}
// Roundtrip
withTempPath { path =>
val df = Seq("a", "b", "c").toDF()
df.write
.option("lineSep", lineSep)
.option("encoding", encoding)
.csv(path.getAbsolutePath)
val readBack = spark
.read
.option("lineSep", lineSep)
.option("encoding", encoding)
.csv(path.getAbsolutePath)
checkAnswer(df, readBack)
}
}
}
// scalastyle:off nonascii
List(
(0, "|", "UTF-8", false),
(1, "^", "UTF-16BE", true),
(2, ":", "ISO-8859-1", true),
(3, "!", "UTF-32LE", false),
(4, 0x1E.toChar.toString, "UTF-8", true),
(5, "아", "UTF-32BE", false),
(6, "у", "CP1251", true),
(8, "\\r", "UTF-16LE", true),
(9, "\\u000d", "UTF-32BE", false),
(10, "=", "US-ASCII", false),
(11, "$", "utf-32le", true)
).foreach { case (testNum, sep, encoding, inferSchema) =>
testLineSeparator(sep, encoding, inferSchema, testNum)
}
// scalastyle:on nonascii
test("lineSep restrictions") {
val errMsg1 = intercept[IllegalArgumentException] {
spark.read.option("lineSep", "").csv(testFile(carsFile)).collect
}.getMessage
assert(errMsg1.contains("'lineSep' cannot be an empty string"))
val errMsg2 = intercept[IllegalArgumentException] {
spark.read.option("lineSep", "123").csv(testFile(carsFile)).collect
}.getMessage
assert(errMsg2.contains("'lineSep' can contain only 1 character"))
}
test("SPARK-26208: write and read empty data to csv file with headers") {
withTempPath { path =>
val df1 = spark.range(10).repartition(2).filter(_ < 0).map(_.toString).toDF
// we have 2 partitions but they are both empty and will be filtered out upon writing
// thanks to SPARK-23271 one new empty partition will be inserted
df1.write.format("csv").option("header", true).save(path.getAbsolutePath)
val df2 = spark.read.format("csv").option("header", true).option("inferSchema", false)
.load(path.getAbsolutePath)
assert(df1.schema === df2.schema)
checkAnswer(df1, df2)
}
}
test("do not produce empty files for empty partitions") {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.emptyDataset[String].write.csv(path)
val files = new File(path).listFiles()
assert(!files.exists(_.getName.endsWith("csv")))
}
}
test("Do not reuse last good value for bad input field") {
val schema = StructType(
StructField("col1", StringType) ::
StructField("col2", DateType) ::
Nil
)
val rows = spark.read
.schema(schema)
.format("csv")
.load(testFile(badAfterGoodFile))
val expectedRows = Seq(
Row("good record", java.sql.Date.valueOf("1999-08-01")),
Row("bad record", null))
checkAnswer(rows, expectedRows)
}
test("SPARK-27512: Decimal type inference should not handle ',' for backward compatibility") {
assert(spark.read
.option("delimiter", "|")
.option("inferSchema", "true")
.csv(Seq("1,2").toDS).schema.head.dataType === StringType)
}
test("SPARK-27873: disabling enforceSchema should not fail columnNameOfCorruptRecord") {
Seq("csv", "").foreach { reader =>
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> reader) {
withTempPath { path =>
val df = Seq(("0", "2013-111-11")).toDF("a", "b")
df.write
.option("header", "true")
.csv(path.getAbsolutePath)
val schema = StructType.fromDDL("a int, b date")
val columnNameOfCorruptRecord = "_unparsed"
val schemaWithCorrField = schema.add(columnNameOfCorruptRecord, StringType)
val readDF = spark
.read
.option("mode", "Permissive")
.option("header", "true")
.option("enforceSchema", false)
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schemaWithCorrField)
.csv(path.getAbsoluteFile.toString)
checkAnswer(readDF, Row(0, null, "0,2013-111-11") :: Nil)
}
}
}
}
test("SPARK-28431: prevent CSV datasource throw TextParsingException with large size message") {
withTempPath { path =>
val maxCharsPerCol = 10000
val str = "a" * (maxCharsPerCol + 1)
Files.write(
path.toPath,
str.getBytes(StandardCharsets.UTF_8),
StandardOpenOption.CREATE, StandardOpenOption.WRITE
)
val errMsg = intercept[TextParsingException] {
spark.read
.option("maxCharsPerColumn", maxCharsPerCol)
.csv(path.getAbsolutePath)
.count()
}.getMessage
assert(errMsg.contains("..."),
"expect the TextParsingException truncate the error content to be 1000 length.")
}
}
test("SPARK-29101 test count with DROPMALFORMED mode") {
Seq((true, 4), (false, 3)).foreach { case (csvColumnPruning, expectedCount) =>
withSQLConf(SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> csvColumnPruning.toString) {
val count = spark.read
.option("header", "true")
.option("mode", "DROPMALFORMED")
.csv(testFile(malformedRowFile))
.count()
assert(expectedCount == count)
}
}
}
}
| caneGuy/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala | Scala | apache-2.0 | 73,875 |
/*******************************************************************************
* Copyright (c) 2014 Guillaume DUBUISSON DUPLESSIS <[email protected]>.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU Public License v3.0
* which accompanies this distribution, and is available at
* http://www.gnu.org/licenses/gpl.html
*
* Contributors:
* Guillaume DUBUISSON DUPLESSIS <[email protected]> - initial API and implementation
******************************************************************************/
package logicAndCodes.P49
import util.ExerciseTemplate
trait P49 extends ExerciseTemplate {
/*
P49 (**) Gray code.
An n-bit Gray code is a sequence of n-bit strings constructed according to certain rules. For example,
n = 1: C(1) = ("0", "1").
n = 2: C(2) = ("00", "01", "11", "10").
n = 3: C(3) = ("000", "001", "011", "010", "110", "111", "101", "100").
Find out the construction rules and write a function to generate Gray codes.
scala> gray(3)
res0 List[String] = List(000, 001, 011, 010, 110, 111, 101, 100)
See if you can use memoization to make the function more efficient.
*/
val name = "P49 (Gray code)"
def gray(l: Int): List[String]
test("Invoking gray with l < 0 should through an IllegalArgumentException") {
intercept[IllegalArgumentException] {
gray(-42)
}
intercept[IllegalArgumentException] {
gray(-1)
}
}
test("Invoking gray with l >= 0 should return the sequence of n-bit strings") {
assert(gray(0) == List())
assert(gray(1) == List("0", "1"))
assert(gray(2) == List("00", "01", "11", "10"))
assert(gray(3) == List("000", "001", "011", "010", "110", "111", "101", "100"))
}
}
| GuillaumeDD/scala99problems | src/main/scala/logicAndCodes/P49/P49.scala | Scala | gpl-3.0 | 1,830 |
package net.liftmodules.ng
package comet
import Angular.{FutureId, ReturnData}
import net.liftweb._
import http._
import common._
import js._
import JE._
import JsCmds._
import net.liftweb.json.{Formats, JsonAST}
import scala.xml.NodeSeq
class LiftNgFutureActor extends CometActor {
def render = NodeSeq.Empty
def callback[T <: Any](id: FutureId, box: => Box[T], formats: Formats) = partialUpdate {
val promise = Angular.DefaultApiSuccessMapper.boxToPromise(box)(formats)
val response = JsonAST.compactRender(promiseToJson(promise))
val js = s"""net_liftmodules_ng.processComet("${Angular.appSelector}",$response,"$id");"""
JsRaw(js)
}
override def lowPriority = {
case ReturnData(id, box, formats) => callback(id, box, formats)
}
}
| joescii/lift-ng | src/main/scala/net/liftmodules/ng/FutureActor.scala | Scala | apache-2.0 | 768 |
/*
* Copyright 2014 Kevin Herron
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.digitalpetri.ethernetip.cip
object CipServiceCodes {
val GetAttributesAll = 0x01
val SetAttributesAll = 0x02
val GetAttributeList = 0x03
val SetAttributeList = 0x04
val Reset = 0x05
val Start = 0x06
val Stop = 0x07
val Create = 0x08
val Delete = 0x09
val MultipleServicePacket = 0x0A
val ApplyAttributes = 0x0D
val GetAttributeSingle = 0x0E
val SetAttributeSingle = 0x10
val FindNextObjectInstance = 0x011
val Restore = 0x15
val Save = 0x16
val Nop = 0x17
val GetMember = 0x18
val SetMember = 0x19
val InsertMember = 0x1A
val RemoveMember = 0x1B
val GroupSync = 0x1C
}
| digitalpetri/scala-ethernet-ip | enip-core/src/main/scala/com/digitalpetri/ethernetip/cip/CipServiceCodes.scala | Scala | apache-2.0 | 1,224 |
package com.productfoundry.akka.cqrs.project
import com.productfoundry.akka.cqrs.publish.EventSubscriber
/**
* Projects events onto a projection.
*/
trait Projector extends EventSubscriber with ProjectionUpdateHandler {
/**
* Uniquely identifies a projection created by the projector.
*/
def projectionId: String
/**
* Default receive behavior.
*/
override def receive: Receive = receivePublishedEvent
/**
* Partial function to handle published aggregate event records.
*/
override def eventReceived: ReceiveEventRecord = project
/**
* Projects a single event record.
*/
def project: ReceiveEventRecord
/**
* Handle a projected update.
* @param update to handle.
*/
override def handleProjectedUpdate(update: ProjectionUpdate): Unit = {}
}
| odd/akka-cqrs | core/src/main/scala/com/productfoundry/akka/cqrs/project/Projector.scala | Scala | apache-2.0 | 802 |
package es.um.nosql.streaminginference.spark.input
import scala.collection.JavaConversions.asScalaBuffer
import org.apache.spark.internal.Logging
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.receiver.Receiver
import org.ektorp.CouchDbConnector
import org.ektorp.CouchDbInstance
import org.ektorp.changes.ChangesCommand
import org.ektorp.changes.ChangesFeed
import org.ektorp.changes.DocumentChange
import org.ektorp.http.HttpClient
import org.ektorp.http.StdHttpClient
import org.ektorp.impl.StdCouchDbInstance
import com.fasterxml.jackson.databind.node.ObjectNode
import es.um.nosql.streaminginference.json2dbschema.util.inflector.Inflector
class CouchDBReceiver (host: String,
port: Int,
username: Option[String] = None,
password: Option[String] = None)
extends Receiver[((String, String), String)](StorageLevel.MEMORY_AND_DISK_2) with Logging
{
def onStart()
{
new Thread("CouchDB Receiver")
{
override def run()
{
receive()
}
}
.start()
}
private def receive()
{
var currentDbs:List[String] = List()
try
{
// Connect with CouchDB Server
val request:StdHttpClient.Builder = new StdHttpClient
.Builder()
.url("http://" + host + ":" + port)
if (username.isDefined)
request.username(username.get)
if (password.isDefined)
request.password(password.get)
val httpClient:HttpClient = request.build
val dbInstance:CouchDbInstance = new StdCouchDbInstance(httpClient)
while (!isStopped())
{
val dbs:List[String] = dbInstance
.getAllDatabases
.toList
// Filter out internal databases
.filter(db => !db.startsWith("_"))
val newDbs = dbs.diff(currentDbs)
newDbs.foreach(newDb =>
{
currentDbs = newDb :: currentDbs
val th = new Thread("Couch DB Watcher")
{
override def run()
{
val db:CouchDbConnector = dbInstance.createConnector(newDb, true)
val cmd:ChangesCommand = new ChangesCommand
.Builder()
.since(0)
.continuous(true)
.includeDocs(true)
.build()
val feed:ChangesFeed = db.changesFeed(cmd)
val entity = Inflector.getInstance.singularize(newDb)
while (feed.isAlive() && !isStopped)
{
val change:DocumentChange = feed.next();
val node = change.getDocAsNode.asInstanceOf[ObjectNode]
node
.put("_type", entity)
.remove("_rev")
store((("couch", entity), node.toString))
}
}
}.start()
})
// Thread waiting
Thread.sleep(10000)
}
}
catch
{
case e: java.net.ConnectException =>
// restart if could not connect to server
restart("Error connecting to " + host + ":" + port, e)
case t: Throwable =>
// restart if there is any other error
restart("Error receiving data", t)
}
}
def onStop()
{
// There is nothing much to do as the thread calling receive()
// is designed to stop by itself if isStopped() returns false
}
} | catedrasaes-umu/NoSQLDataEngineering | projects/es.um.nosql.streaminginference.json2dbschema/src/es/um/nosql/streaminginference/spark/input/CouchDBReceiver.scala | Scala | mit | 3,857 |
package scopeA {
class ProtectedClass1(protected val protectedField1: Int) {
protected val protectedField2 = 1
def equalFields(other: ProtectedClass1) =
(protectedField1 == other.protectedField1) &&
(protectedField2 == other.protectedField2) &&
(nested == other.nested)
class Nested {
protected val nestedField = 1
}
protected val nested = new Nested
}
// 파생 타입
class ProtectedClass2 extends ProtectedClass1(1) {
val field1 = protectedField1
val field2 = protectedField2
// 오류: Nested class 내의 protected는 접근 불가
// val nField = new Nested().nestedField
}
// 인스턴스
class ProtectedClass3 {
val protectedClass1 = new ProtectedClass1(1)
// 오류
// val protectedField1 = protectedClass1.protectedField1
// val protectedField2 = protectedClass1.protectedField2
// val protectedNField = protectedClass1.nested.nestedField
protected class ProtectedClass4
class ProtectedClass5 extends ProtectedClass4
protected class ProtectedClass6 extends ProtectedClass4
}
}
package scopeB {
// 오류
// class ProtectedClass4B extends scopeA.ProtectedClass4
} | younggi/books | programming_scala/progscala2/src/main/scala/progscala2/visibility/protected.scala | Scala | mit | 1,192 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions.Add
import org.apache.spark.sql.catalyst.plans.{FullOuter, LeftOuter, PlanTest, RightOuter}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
class LimitPushdownSuite extends PlanTest {
private object Optimize extends RuleExecutor[LogicalPlan] {
val batches =
Batch("Subqueries", Once,
EliminateSubqueryAliases) ::
Batch("Limit pushdown", FixedPoint(100),
LimitPushDown,
CombineLimits,
ConstantFolding,
BooleanSimplification) :: Nil
}
private val testRelation = LocalRelation('a.int, 'b.int, 'c.int)
private val testRelation2 = LocalRelation('d.int, 'e.int, 'f.int)
private val x = testRelation.subquery('x)
private val y = testRelation.subquery('y)
// Union ---------------------------------------------------------------------------------------
test("Union: limit to each side") {
val unionQuery = Union(testRelation, testRelation2).limit(1)
val unionOptimized = Optimize.execute(unionQuery.analyze)
val unionCorrectAnswer =
Limit(1, Union(LocalLimit(1, testRelation), LocalLimit(1, testRelation2))).analyze
comparePlans(unionOptimized, unionCorrectAnswer)
}
test("Union: limit to each side with constant-foldable limit expressions") {
val unionQuery = Union(testRelation, testRelation2).limit(Add(1, 1))
val unionOptimized = Optimize.execute(unionQuery.analyze)
val unionCorrectAnswer =
Limit(2, Union(LocalLimit(2, testRelation), LocalLimit(2, testRelation2))).analyze
comparePlans(unionOptimized, unionCorrectAnswer)
}
test("Union: limit to each side with the new limit number") {
val unionQuery = Union(testRelation, testRelation2.limit(3)).limit(1)
val unionOptimized = Optimize.execute(unionQuery.analyze)
val unionCorrectAnswer =
Limit(1, Union(LocalLimit(1, testRelation), LocalLimit(1, testRelation2))).analyze
comparePlans(unionOptimized, unionCorrectAnswer)
}
test("Union: no limit to both sides if children having smaller limit values") {
val unionQuery = Union(testRelation.limit(1), testRelation2.select('d).limit(1)).limit(2)
val unionOptimized = Optimize.execute(unionQuery.analyze)
val unionCorrectAnswer =
Limit(2, Union(testRelation.limit(1), testRelation2.select('d).limit(1))).analyze
comparePlans(unionOptimized, unionCorrectAnswer)
}
test("Union: limit to each sides if children having larger limit values") {
val testLimitUnion = Union(testRelation.limit(3), testRelation2.select('d).limit(4))
val unionQuery = testLimitUnion.limit(2)
val unionOptimized = Optimize.execute(unionQuery.analyze)
val unionCorrectAnswer =
Limit(2, Union(LocalLimit(2, testRelation), LocalLimit(2, testRelation2.select('d)))).analyze
comparePlans(unionOptimized, unionCorrectAnswer)
}
// Outer join ----------------------------------------------------------------------------------
test("left outer join") {
val originalQuery = x.join(y, LeftOuter).limit(1)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = Limit(1, LocalLimit(1, y).join(y, LeftOuter)).analyze
comparePlans(optimized, correctAnswer)
}
test("right outer join") {
val originalQuery = x.join(y, RightOuter).limit(1)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = Limit(1, x.join(LocalLimit(1, y), RightOuter)).analyze
comparePlans(optimized, correctAnswer)
}
test("larger limits are not pushed on top of smaller ones in right outer join") {
val originalQuery = x.join(y.limit(5), RightOuter).limit(10)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = Limit(10, x.join(Limit(5, y), RightOuter)).analyze
comparePlans(optimized, correctAnswer)
}
test("full outer join where neither side is limited and both sides have same statistics") {
assert(x.statistics.sizeInBytes === y.statistics.sizeInBytes)
val originalQuery = x.join(y, FullOuter).limit(1)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = Limit(1, LocalLimit(1, x).join(y, FullOuter)).analyze
comparePlans(optimized, correctAnswer)
}
test("full outer join where neither side is limited and left side has larger statistics") {
val xBig = testRelation.copy(data = Seq.fill(2)(null)).subquery('x)
assert(xBig.statistics.sizeInBytes > y.statistics.sizeInBytes)
val originalQuery = xBig.join(y, FullOuter).limit(1)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = Limit(1, LocalLimit(1, xBig).join(y, FullOuter)).analyze
comparePlans(optimized, correctAnswer)
}
test("full outer join where neither side is limited and right side has larger statistics") {
val yBig = testRelation.copy(data = Seq.fill(2)(null)).subquery('y)
assert(x.statistics.sizeInBytes < yBig.statistics.sizeInBytes)
val originalQuery = x.join(yBig, FullOuter).limit(1)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = Limit(1, x.join(LocalLimit(1, yBig), FullOuter)).analyze
comparePlans(optimized, correctAnswer)
}
test("full outer join where both sides are limited") {
val originalQuery = x.limit(2).join(y.limit(2), FullOuter).limit(1)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = Limit(1, Limit(2, x).join(Limit(2, y), FullOuter)).analyze
comparePlans(optimized, correctAnswer)
}
}
| Panos-Bletsos/spark-cost-model-optimizer | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/LimitPushdownSuite.scala | Scala | apache-2.0 | 6,578 |
/*
* Scala classfile decoder (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.scalap
package scalax
package rules
package scalasig
trait Flags {
def hasFlag(flag: Long): Boolean
def isImplicit = hasFlag(0x00000001)
def isFinal = hasFlag(0x00000002)
def isPrivate = hasFlag(0x00000004)
def isProtected = hasFlag(0x00000008)
def isSealed = hasFlag(0x00000010)
def isOverride = hasFlag(0x00000020)
def isCase = hasFlag(0x00000040)
def isAbstract = hasFlag(0x00000080)
def isDeferred = hasFlag(0x00000100)
def isMethod = hasFlag(0x00000200)
def isModule = hasFlag(0x00000400)
def isInterface = hasFlag(0x00000800)
def isMutable = hasFlag(0x00001000)
def isParam = hasFlag(0x00002000)
def isPackage = hasFlag(0x00004000)
def isDeprecated = hasFlag(0x00008000)
def isCovariant = hasFlag(0x00010000)
def isCaptured = hasFlag(0x00010000)
def isByNameParam = hasFlag(0x00010000)
def isContravariant = hasFlag(0x00020000)
def isLabel = hasFlag(0x00020000) // method symbol is a label. Set by TailCall
def isInConstructor = hasFlag(0x00020000) // class symbol is defined in this/superclass constructor
def isAbstractOverride = hasFlag(0x00040000)
def isLocal = hasFlag(0x00080000)
def isJava = hasFlag(0x00100000)
def isSynthetic = hasFlag(0x00200000)
def isStable = hasFlag(0x00400000)
def isStatic = hasFlag(0x00800000)
def isCaseAccessor = hasFlag(0x01000000)
def isTrait = hasFlag(0x02000000)
def isBridge = hasFlag(0x04000000)
def isAccessor = hasFlag(0x08000000)
def isSuperAccessor = hasFlag(0x10000000)
def isParamAccessor = hasFlag(0x20000000)
def isModuleVar = hasFlag(0x40000000) // for variables: is the variable caching a module value
def isMonomorphic = hasFlag(0x40000000) // for type symbols: does not have type parameters
def isLazy = hasFlag(0x80000000L) // symbol is a lazy val. can't have MUTABLE unless transformed by typer
def isError = hasFlag(0x100000000L)
def isOverloaded = hasFlag(0x200000000L)
def isLifted = hasFlag(0x400000000L)
def isMixedIn = hasFlag(0x800000000L)
def isExistential = hasFlag(0x800000000L)
def isExpandedName = hasFlag(0x1000000000L)
def isImplementationClass = hasFlag(0x2000000000L)
def isPreSuper = hasFlag(0x2000000000L)
}
| scala/scala | src/scalap/scala/tools/scalap/scalax/rules/scalasig/Flags.scala | Scala | apache-2.0 | 2,523 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.accessibility
import java.util
import org.joda.time.DateTime
import org.joda.time.format.DateTimeFormat
import org.openqa.selenium.WebDriver.{Navigation, Options, TargetLocator}
import org.openqa.selenium.firefox.{FirefoxProfile, FirefoxDriver}
import org.openqa.selenium.{JavascriptExecutor, By, WebDriver, WebElement}
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
class AccessibilityDriver(val port: Int = 8080) extends WebDriver with JavascriptExecutor {
//Queue to handle intercepted pages
val interceptedPages = new QueueStream
//Start a thread with the proxy server running
HttpProxyServerFactory.buildHtmlInterceptingProxy(port, interceptedPages.put).start()
//Create a delegate WebDriver
val profile = new FirefoxProfile()
profile.setPreference("network.proxy.type", 1)
profile.setPreference("network.proxy.http", "localhost")
profile.setPreference("network.proxy.http_port", port)
profile.setPreference("network.proxy.no_proxies_on", "")
val delegate = new FirefoxDriver(profile)
val runTime = DateTime.now
val runStamp = DateTimeFormat.forPattern("yyyyMMddHHmmss").print(runTime)
//Run page interceptor in worker thread
Future {
//Consume from interceptedPages EOS
val pages = for(page <- interceptedPages) yield {
ReportWriter.createAccessibilityReport(runStamp, page)
page
}
ReportWriter.createReportWrapper(pages.toSet, runStamp, runTime)
}
override def get(url: String): Unit = delegate.get(url)
override def getPageSource: String = delegate.getPageSource
override def findElements(by: By): util.List[WebElement] = delegate.findElements(by)
override def getWindowHandle: String = delegate.getWindowHandle
override def manage(): Options = delegate.manage()
override def getWindowHandles: util.Set[String] = delegate.getWindowHandles
override def switchTo(): TargetLocator = delegate.switchTo()
override def close(): Unit = delegate.close()
override def getCurrentUrl: String = delegate.getCurrentUrl
override def navigate(): Navigation = delegate.navigate()
override def getTitle: String = delegate.getTitle
override def findElement(by: By): WebElement = delegate.findElement(by)
override def quit(): Unit = {
interceptedPages.put(StopMessage)
delegate.quit()
}
override def executeScript(script: String, args: AnyRef*): AnyRef = delegate.executeScript(script, args)
override def executeAsyncScript(script: String, args: AnyRef*): AnyRef = delegate.executeAsyncScript(script, args)
}
| kristapsmelderis/accessibility-driver | src/main/scala/uk/gov/hmrc/accessibility/AccessibilityDriver.scala | Scala | apache-2.0 | 3,182 |
package japgolly.scalajs.react.extra
import monocle._
import scalaz.effect.IO
import japgolly.scalajs.react._, ScalazReact._
/**
* Reusable version of [[ExternalVar]].
*/
final class ReusableVar[A](val value: A, val set: A ~=> IO[Unit])(implicit val reusability: Reusability[A]) {
override def toString =
s"ReusableVar($value, $set)"
def mod(f: A => A): IO[Unit] =
set(f(value))
def setL[B](l: Lens[A, B]): B => IO[Unit] =
b => set(l.set(b)(value))
def modL[B](l: Lens[A, B])(f: B => B): IO[Unit] =
set(l.modify(f)(value))
// Zoom is dangerously deceptive here as it appears to work but will often override the non-zoomed subset of A's state.
// Use the zoom methods on ComponentScopes directly for a reliable function.
//
// def zoomL[B: Reusability](l: Lens[A, B]): ReusableVar[B] =
// ReusableVar(l get value)(set.dimap(s => b => s(l.set(b)(value))))
//
// def extZoomL[B](l: Lens[A, B]): ExternalVar[B] =
// ExternalVar(l get value)(b => set(l.set(b)(value)))
def toExternalVar: ExternalVar[A] =
ExternalVar(value)(set)
}
object ReusableVar {
@inline def apply[A: Reusability](value: A)(set: A ~=> IO[Unit]): ReusableVar[A] =
new ReusableVar(value, set)
@inline def state[S: Reusability]($: CompStateFocus[S]): ReusableVar[S] =
new ReusableVar($.state, ReusableFn($).setStateIO)
implicit def reusability[A]: Reusability[ReusableVar[A]] =
Reusability.fn((a, b) => (a.set ~=~ b.set) && b.reusability.test(a.value, b.value))
}
| beni55/scalajs-react | extra/src/main/scala/japgolly/scalajs/react/extra/ReusableVar.scala | Scala | apache-2.0 | 1,509 |
package models.tenant
import java.time.LocalDateTime
case class MedicalIncident(datetime: LocalDateTime, description: String)
| SBP07/backend | app/models/tenant/MedicalIncident.scala | Scala | gpl-2.0 | 128 |
package se.culvertsoft.mgen.jspack.generator
import scala.collection.JavaConversions.asScalaBuffer
import se.culvertsoft.mgen.api.model.GeneratedSourceFile
import se.culvertsoft.mgen.api.model.Project
import se.culvertsoft.mgen.api.plugins.Generator
import se.culvertsoft.mgen.compiler.util.SourceCodeBuffer
import se.culvertsoft.mgen.compiler.util.SourceCodeBuffer.SourceCodeBuffer2String
class JavascriptGenerator extends Generator {
override def generate(project: Project, generatorSettings: java.util.Map[String, String]): java.util.List[GeneratedSourceFile] = {
implicit val txtBuffer = SourceCodeBuffer.getThreadLocal()
txtBuffer.clear()
val modules = project.allModulesRecursively()
val filePath = MkFilePath(generatorSettings)
MkIntro(generatorSettings)
MkModuleClassRegistry(modules)
MkModuleHashRegistry(modules)
MkOutro(generatorSettings)
val out = new java.util.ArrayList[GeneratedSourceFile]
out.add(new GeneratedSourceFile(filePath, txtBuffer))
out
}
} | culvertsoft/mgen | mgen-javascriptgenerator/src/main/scala/se/culvertsoft/mgen/jspack/generator/JavascriptGenerator.scala | Scala | mit | 1,022 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import java.io.{BufferedOutputStream, FileOutputStream, File, OutputStream}
import java.nio.channels.FileChannel
import org.apache.spark.Logging
import org.apache.spark.serializer.{SerializerInstance, SerializationStream}
import org.apache.spark.executor.ShuffleWriteMetrics
import org.apache.spark.util.Utils
/**
* A class for writing JVM objects directly to a file on disk. This class allows data to be appended
* to an existing block and can guarantee atomicity in the case of faults as it allows the caller to
* revert partial writes.
*
* This class does not support concurrent writes. Also, once the writer has been opened it cannot be
* reopened again.
*/
private[spark] class DiskBlockObjectWriter(
val file: File,
serializerInstance: SerializerInstance,
bufferSize: Int,
compressStream: OutputStream => OutputStream,
syncWrites: Boolean,
// These write metrics concurrently shared with other active DiskBlockObjectWriters who
// are themselves performing writes. All updates must be relative.
writeMetrics: ShuffleWriteMetrics,
val blockId: BlockId = null)
extends OutputStream
with Logging {
/** The file channel, used for repositioning / truncating the file. */
private var channel: FileChannel = null
private var bs: OutputStream = null
private var fos: FileOutputStream = null
private var ts: TimeTrackingOutputStream = null
private var objOut: SerializationStream = null
private var initialized = false
private var hasBeenClosed = false
private var commitAndCloseHasBeenCalled = false
/**
* Cursors used to represent positions in the file.
*
* xxxxxxxx|--------|--- |
* ^ ^ ^
* | | finalPosition
* | reportedPosition
* initialPosition
*
* initialPosition: Offset in the file where we start writing. Immutable.
* reportedPosition: Position at the time of the last update to the write metrics.
* finalPosition: Offset where we stopped writing. Set on closeAndCommit() then never changed.
* -----: Current writes to the underlying file.
* xxxxx: Existing contents of the file.
*/
private val initialPosition = file.length()
private var finalPosition: Long = -1
private var reportedPosition = initialPosition
/**
* Keep track of number of records written and also use this to periodically
* output bytes written since the latter is expensive to do for each record.
*/
private var numRecordsWritten = 0
def open(): DiskBlockObjectWriter = {
if (hasBeenClosed) {
throw new IllegalStateException("Writer already closed. Cannot be reopened.")
}
fos = new FileOutputStream(file, true)
ts = new TimeTrackingOutputStream(writeMetrics, fos)
channel = fos.getChannel()
bs = compressStream(new BufferedOutputStream(ts, bufferSize))
objOut = serializerInstance.serializeStream(bs)
initialized = true
this
}
override def close() {
if (initialized) {
Utils.tryWithSafeFinally {
if (syncWrites) {
// Force outstanding writes to disk and track how long it takes
objOut.flush()
val start = System.nanoTime()
fos.getFD.sync()
writeMetrics.incShuffleWriteTime(System.nanoTime() - start)
}
} {
objOut.close()
}
channel = null
bs = null
fos = null
ts = null
objOut = null
initialized = false
hasBeenClosed = true
}
}
def isOpen: Boolean = objOut != null
/**
* Flush the partial writes and commit them as a single atomic block.
*/
def commitAndClose(): Unit = {
if (initialized) {
// NOTE: Because Kryo doesn't flush the underlying stream we explicitly flush both the
// serializer stream and the lower level stream.
objOut.flush()
bs.flush()
close()
finalPosition = file.length()
// In certain compression codecs, more bytes are written after close() is called
writeMetrics.incShuffleBytesWritten(finalPosition - reportedPosition)
} else {
finalPosition = file.length()
}
commitAndCloseHasBeenCalled = true
}
/**
* Reverts writes that haven't been flushed yet. Callers should invoke this function
* when there are runtime exceptions. This method will not throw, though it may be
* unsuccessful in truncating written data.
*
* @return the file that this DiskBlockObjectWriter wrote to.
*/
def revertPartialWritesAndClose(): File = {
// Discard current writes. We do this by flushing the outstanding writes and then
// truncating the file to its initial position.
try {
if (initialized) {
writeMetrics.decShuffleBytesWritten(reportedPosition - initialPosition)
writeMetrics.decShuffleRecordsWritten(numRecordsWritten)
objOut.flush()
bs.flush()
close()
}
val truncateStream = new FileOutputStream(file, true)
try {
truncateStream.getChannel.truncate(initialPosition)
file
} finally {
truncateStream.close()
}
} catch {
case e: Exception =>
logError("Uncaught exception while reverting partial writes to file " + file, e)
file
}
}
/**
* Writes a key-value pair.
*/
def write(key: Any, value: Any) {
if (!initialized) {
open()
}
objOut.writeKey(key)
objOut.writeValue(value)
recordWritten()
}
override def write(b: Int): Unit = throw new UnsupportedOperationException()
override def write(kvBytes: Array[Byte], offs: Int, len: Int): Unit = {
if (!initialized) {
open()
}
bs.write(kvBytes, offs, len)
}
/**
* Notify the writer that a record worth of bytes has been written with OutputStream#write.
*/
def recordWritten(): Unit = {
numRecordsWritten += 1
writeMetrics.incShuffleRecordsWritten(1)
if (numRecordsWritten % 32 == 0) {
updateBytesWritten()
}
}
/**
* Returns the file segment of committed data that this Writer has written.
* This is only valid after commitAndClose() has been called.
*/
def fileSegment(): FileSegment = {
if (!commitAndCloseHasBeenCalled) {
throw new IllegalStateException(
"fileSegment() is only valid after commitAndClose() has been called")
}
new FileSegment(file, initialPosition, finalPosition - initialPosition)
}
/**
* Report the number of bytes written in this writer's shuffle write metrics.
* Note that this is only valid before the underlying streams are closed.
*/
private def updateBytesWritten() {
val pos = channel.position()
writeMetrics.incShuffleBytesWritten(pos - reportedPosition)
reportedPosition = pos
}
// For testing
private[spark] override def flush() {
objOut.flush()
bs.flush()
}
}
| chenc10/Spark-PAF | core/src/main/scala/org/apache/spark/storage/DiskBlockObjectWriter.scala | Scala | apache-2.0 | 7,704 |
package org.bitcoins.core.gen
import org.bitcoins.core.crypto._
import org.bitcoins.core.currency.CurrencyUnit
import org.bitcoins.core.number.UInt32
import org.bitcoins.core.policy.Policy
import org.bitcoins.core.protocol.script._
import org.bitcoins.core.protocol.transaction._
import org.bitcoins.core.script.crypto.HashType
import org.bitcoins.core.util.BitcoinSLogger
import org.bitcoins.core.wallet.EscrowTimeoutHelper
import org.scalacheck.Gen
/**
* Created by chris on 11/28/16.
*/
sealed abstract class WitnessGenerators extends BitcoinSLogger {
/** Generates a random [[org.bitcoins.core.protocol.script.ScriptWitness]] */
def scriptWitness: Gen[ScriptWitness] = {
//TODO: I need to come back and uncomment out this code after fixing
//#111 on the issue tracker. We should be able to support an arbtirary byte vector,
//not only pre-defined script witness types
//0 include here to generate the EmptyScriptWitness
/* val stack: Gen[Seq[Seq[Byte]]] = Gen.choose(0,10).flatMap(n => Gen.listOfN(n, NumberGenerator.bytes))
stack.map { s: Seq[Seq[Byte]] =>
val spkBytes = if (s.nonEmpty) s.head else Nil
val cmpctSPK = CompactSizeUInt(UInt64(spkBytes.size))
val scriptSigBytes: Seq[Byte] = if (s.size > 1) s.tail.flatten else Nil
val cmpctScriptSig = CompactSizeUInt(UInt64(scriptSigBytes.size))
val scriptSig = if (scriptSigBytes.isEmpty) EmptyScriptSignature else NonStandardScriptSignature(cmpctScriptSig.bytes ++ scriptSigBytes)
val spk = if (spkBytes.isEmpty) EmptyScriptPubKey else NonStandardScriptPubKey(cmpctSPK.bytes ++ spkBytes)
P2WSHWitnessV0(spk,scriptSig)
}*/
Gen.oneOf(p2wpkhWitnessV0, p2wshWitnessV0)
}
/** Generates a [[TransactionWitness]] with the specified number of witnesses */
def transactionWitness(numWitnesses: Int): Gen[TransactionWitness] = for {
inputWitnesses <- Gen.listOfN(numWitnesses, Gen.option(scriptWitness))
} yield TransactionWitness.fromWitOpt(inputWitnesses)
def transactionWitness: Gen[TransactionWitness] = for {
num <- Gen.choose(1, 10)
wit <- transactionWitness(num)
} yield wit
/** Generates a validly signed [[TransactionWitness]] */
def signedP2WPKHTransactionWitness: Gen[(TransactionWitness, WitnessTxSigComponent, Seq[ECPrivateKey])] = for {
privKey <- CryptoGenerators.privateKey
amount <- CurrencyUnitGenerator.satoshis
hashType <- CryptoGenerators.hashType
witScriptPubKey = P2WPKHWitnessSPKV0(privKey.publicKey)
unsignedScriptWitness = P2WPKHWitnessV0(privKey.publicKey)
unsignedWTxSigComponent = createUnsignedRawWTxSigComponent(witScriptPubKey, amount,
unsignedScriptWitness, None)
createdSig = TransactionSignatureCreator.createSig(unsignedWTxSigComponent, privKey, hashType)
scriptWitness = P2WPKHWitnessV0(privKey.publicKey, createdSig)
(witness, signedWtxSigComponent) = createSignedWTxComponent(scriptWitness, unsignedWTxSigComponent)
} yield (witness, signedWtxSigComponent, Seq(privKey))
def signedP2WSHP2PKTransactionWitness: Gen[(TransactionWitness, WitnessTxSigComponentRaw, Seq[ECPrivateKey])] = for {
(scriptPubKey, privKeys) <- ScriptGenerators.p2pkScriptPubKey
amount <- CurrencyUnitGenerator.satoshis
hashType <- CryptoGenerators.hashType
witScriptPubKey = P2WSHWitnessSPKV0(scriptPubKey)
unsignedScriptWitness = P2WSHWitnessV0(scriptPubKey)
u = createUnsignedRawWTxSigComponent(witScriptPubKey, amount,
unsignedScriptWitness, None)
createdSig = TransactionSignatureCreator.createSig(u, privKeys, hashType)
signedScriptWitness = P2WSHWitnessV0(scriptPubKey, P2PKScriptSignature(createdSig))
oldTx = u.transaction
txWitness = TransactionWitness(oldTx.witness.witnesses.updated(u.inputIndex.toInt, signedScriptWitness))
wtx = WitnessTransaction(oldTx.version, oldTx.inputs, oldTx.outputs, oldTx.lockTime, txWitness)
signedWtxSigComponent = WitnessTxSigComponentRaw(wtx, u.inputIndex, witScriptPubKey, u.flags, u.amount)
} yield (txWitness, signedWtxSigComponent, Seq(privKeys))
def signedP2WSHP2PKHTransactionWitness: Gen[(TransactionWitness, WitnessTxSigComponentRaw, Seq[ECPrivateKey])] = for {
(scriptPubKey, privKey) <- ScriptGenerators.p2pkhScriptPubKey
amount <- CurrencyUnitGenerator.satoshis
hashType <- CryptoGenerators.hashType
witScriptPubKey = P2WSHWitnessSPKV0(scriptPubKey)
unsignedScriptWitness = P2WSHWitnessV0(scriptPubKey)
u = createUnsignedRawWTxSigComponent(witScriptPubKey, amount, unsignedScriptWitness, None)
createdSig = TransactionSignatureCreator.createSig(u, privKey, hashType)
signedScriptWitness = P2WSHWitnessV0(scriptPubKey, P2PKHScriptSignature(createdSig, privKey.publicKey))
oldTx = u.transaction
txWitness = TransactionWitness(oldTx.witness.witnesses.updated(u.inputIndex.toInt, signedScriptWitness))
wtx = WitnessTransaction(oldTx.version, oldTx.inputs, oldTx.outputs, oldTx.lockTime, txWitness)
signedWtxSigComponent = WitnessTxSigComponentRaw(wtx, u.inputIndex, witScriptPubKey, u.flags, u.amount)
} yield (txWitness, signedWtxSigComponent, Seq(privKey))
def signedP2WSHMultiSigTransactionWitness: Gen[(TransactionWitness, WitnessTxSigComponentRaw, Seq[ECPrivateKey])] = for {
(scriptPubKey, privKeys) <- ScriptGenerators.multiSigScriptPubKey
amount <- CurrencyUnitGenerator.satoshis
hashType <- CryptoGenerators.hashType
witScriptPubKey = P2WSHWitnessSPKV0(scriptPubKey)
unsignedScriptWitness = P2WSHWitnessV0(scriptPubKey)
u = createUnsignedRawWTxSigComponent(witScriptPubKey, amount,
unsignedScriptWitness, None)
signedScriptSig = multiSigScriptSigGenHelper(privKeys, scriptPubKey, u, hashType)
signedScriptWitness = P2WSHWitnessV0(scriptPubKey, signedScriptSig)
oldTx = u.transaction
txWitness = TransactionWitness(oldTx.witness.witnesses.updated(u.inputIndex.toInt, signedScriptWitness))
wtx = WitnessTransaction(oldTx.version, oldTx.inputs, oldTx.outputs, oldTx.lockTime, txWitness)
signedWtxSigComponent = WitnessTxSigComponentRaw(wtx, u.inputIndex, witScriptPubKey, u.flags, u.amount)
} yield (txWitness, signedWtxSigComponent, privKeys)
/**
* Generates a random signed [[TransactionWitness]] with the corresponding [[WitnessTxSigComponent]]
* and [[ECPrivateKey]]s
*/
def signedP2WSHTransactionWitness: Gen[(TransactionWitness, WitnessTxSigComponentRaw, Seq[ECPrivateKey])] = {
Gen.oneOf(signedP2WSHP2PKTransactionWitness, signedP2WSHP2PKHTransactionWitness,
signedP2WSHMultiSigTransactionWitness, signedP2WSHEscrowTimeoutWitness)
}
def signedP2WSHMultiSigEscrowTimeoutWitness: Gen[(TransactionWitness, WitnessTxSigComponentRaw, Seq[ECPrivateKey])] = for {
(scriptPubKey, privKeys) <- ScriptGenerators.escrowTimeoutScriptPubKey
amount <- CurrencyUnitGenerator.satoshis
hashType <- CryptoGenerators.hashType
witScriptPubKey = P2WSHWitnessSPKV0(scriptPubKey)
unsignedScriptWitness = P2WSHWitnessV0(scriptPubKey)
u = createUnsignedRawWTxSigComponent(witScriptPubKey, amount,
unsignedScriptWitness, None)
signedScriptSig = csvEscrowTimeoutGenHelper(privKeys, scriptPubKey, u, hashType)
witness = EscrowTimeoutHelper.buildEscrowTimeoutScriptWitness(signedScriptSig, scriptPubKey, u)
oldTx = u.transaction
wTx = WitnessTransaction(oldTx.version, oldTx.inputs, oldTx.outputs, oldTx.lockTime, witness)
signedWTxSigComponent = WitnessTxSigComponentRaw(wTx, u.inputIndex, witScriptPubKey, u.flags, u.amount)
} yield (witness, signedWTxSigComponent, privKeys)
def spendableP2WSHTimeoutEscrowTimeoutWitness: Gen[(TransactionWitness, WitnessTxSigComponentRaw, Seq[ECPrivateKey])] = for {
(p2pkh, privKey) <- ScriptGenerators.p2pkhScriptPubKey
(scriptNum, sequence) <- TransactionGenerators.spendableCSVValues
csv = CSVScriptPubKey(scriptNum, p2pkh)
(m, _) <- ScriptGenerators.smallMultiSigScriptPubKey
scriptPubKey = EscrowTimeoutScriptPubKey(m, csv)
amount <- CurrencyUnitGenerator.satoshis
hashType <- CryptoGenerators.hashType
witScriptPubKey = P2WSHWitnessSPKV0(scriptPubKey)
unsignedScriptWitness = P2WSHWitnessV0(scriptPubKey)
u = createUnsignedRawWTxSigComponent(
witScriptPubKey,
amount, unsignedScriptWitness, Some(sequence))
createdSig = TransactionSignatureCreator.createSig(u, privKey, hashType)
scriptSig = CSVScriptSignature(P2PKHScriptSignature(createdSig, privKey.publicKey))
signedScriptWitness = P2WSHWitnessV0(scriptPubKey, EscrowTimeoutScriptSignature.fromLockTime(scriptSig))
//ScriptWitness(scriptPubKey.asm.flatMap(_.bytes) +: Seq(ScriptNumber.zero.bytes, privKey.publicKey.bytes,
//createdSig.bytes))
oldTx = u.transaction
txWitness = TransactionWitness(oldTx.witness.witnesses.updated(u.inputIndex.toInt, signedScriptWitness))
wtx = WitnessTransaction(oldTx.version, oldTx.inputs, oldTx.outputs, oldTx.lockTime, txWitness)
signedWtxSigComponent = WitnessTxSigComponentRaw(wtx, u.inputIndex, witScriptPubKey, u.flags, u.amount)
} yield (txWitness, signedWtxSigComponent, Seq(privKey))
def signedP2WSHEscrowTimeoutWitness: Gen[(TransactionWitness, WitnessTxSigComponentRaw, Seq[ECPrivateKey])] = {
Gen.oneOf(signedP2WSHMultiSigEscrowTimeoutWitness, spendableP2WSHTimeoutEscrowTimeoutWitness)
}
/** Helps generate a signed [[MultiSignatureScriptSignature]] */
private def multiSigScriptSigGenHelper(
privateKeys: Seq[ECPrivateKey],
scriptPubKey: MultiSignatureScriptPubKey,
unsignedWtxSigComponent: WitnessTxSigComponent,
hashType: HashType): MultiSignatureScriptSignature = {
val requiredSigs = scriptPubKey.requiredSigs
val txSignatures = for {
i <- 0 until requiredSigs
} yield TransactionSignatureCreator.createSig(unsignedWtxSigComponent, privateKeys(i), hashType)
//add the signature to the scriptSig instead of having an empty scriptSig
val signedScriptSig = MultiSignatureScriptSignature(txSignatures)
signedScriptSig
}
def csvEscrowTimeoutGenHelper(privateKeys: Seq[ECPrivateKey], scriptPubKey: EscrowTimeoutScriptPubKey,
unsignedWtxSigComponent: WitnessTxSigComponent,
hashType: HashType): EscrowTimeoutScriptSignature = {
if (scriptPubKey.escrow.requiredSigs == 0) {
EscrowTimeoutScriptSignature.fromMultiSig(MultiSignatureScriptSignature(Nil))
} else if (privateKeys.size == 1) {
val signature = csvEscrowTimeoutGenSignature(privateKeys.head, scriptPubKey, unsignedWtxSigComponent, hashType)
EscrowTimeoutScriptSignature.fromMultiSig(MultiSignatureScriptSignature(Seq(signature)))
} else {
val multiSig = multiSigScriptSigGenHelper(privateKeys, scriptPubKey.escrow, unsignedWtxSigComponent, hashType)
EscrowTimeoutScriptSignature.fromMultiSig(multiSig)
}
}
def csvEscrowTimeoutGenSignature(privKey: ECPrivateKey, scriptPubKey: EscrowTimeoutScriptPubKey,
unsignedWtxSigComponent: WitnessTxSigComponent, hashType: HashType): ECDigitalSignature = {
val signature = TransactionSignatureCreator.createSig(unsignedWtxSigComponent, privKey, hashType)
signature
}
/** Generates a random [[org.bitcoins.core.protocol.script.P2WPKHWitnessV0]] */
def p2wpkhWitnessV0: Gen[P2WPKHWitnessV0] = for {
publicKey <- CryptoGenerators.publicKey
sig <- CryptoGenerators.digitalSignature
} yield P2WPKHWitnessV0(publicKey, sig)
/** Generates a random [[org.bitcoins.core.protocol.script.P2WSHWitnessV0]] */
def p2wshWitnessV0: Gen[P2WSHWitnessV0] = for {
(redeem, _) <- ScriptGenerators.scriptPubKey
scriptSig <- ScriptGenerators.scriptSignature
} yield P2WSHWitnessV0(redeem, scriptSig)
/** Takes a signed [[ScriptWitness]] and an unsignedTx and adds the witness to the unsigned [[WitnessTransaction]] */
def createSignedWTxComponent(witness: ScriptWitness, unsignedWTxComponent: WitnessTxSigComponent): (TransactionWitness, WitnessTxSigComponent) = {
val signedTxWitness = TransactionWitness.fromWitOpt(Seq(Some(witness)))
val unsignedSpendingTx = unsignedWTxComponent.transaction
val signedSpendingTx = WitnessTransaction(unsignedSpendingTx.version, unsignedSpendingTx.inputs, unsignedSpendingTx.outputs,
unsignedSpendingTx.lockTime, signedTxWitness)
val signedWtxSigComponent = unsignedWTxComponent match {
case wtxP2SH: WitnessTxSigComponentP2SH =>
WitnessTxSigComponent(signedSpendingTx, unsignedWTxComponent.inputIndex,
wtxP2SH.scriptPubKey, unsignedWTxComponent.flags, unsignedWTxComponent.amount)
case wtxRaw: WitnessTxSigComponentRaw =>
WitnessTxSigComponent(signedSpendingTx, unsignedWTxComponent.inputIndex,
wtxRaw.scriptPubKey, unsignedWTxComponent.flags, unsignedWTxComponent.amount)
}
(signedTxWitness, signedWtxSigComponent)
}
/** Creates a unsigned [[WitnessTxSigComponent]] from the given parameters */
def createUnsignedRawWTxSigComponent(witScriptPubKey: WitnessScriptPubKey, amount: CurrencyUnit,
unsignedScriptWitness: ScriptWitness, sequence: Option[UInt32]): WitnessTxSigComponentRaw = {
val tc = TransactionConstants
val flags = Policy.standardScriptVerifyFlags
val witness = TransactionWitness.fromWitOpt(Seq(Some(unsignedScriptWitness)))
val (creditingTx, outputIndex) = TransactionGenerators.buildCreditingTransaction(witScriptPubKey, amount)
val (unsignedSpendingTx, inputIndex) = TransactionGenerators.buildSpendingTransaction(tc.validLockVersion, creditingTx,
EmptyScriptSignature, outputIndex, tc.lockTime,
sequence.getOrElse(tc.sequence), witness)
val unsignedWtxSigComponent = WitnessTxSigComponentRaw(unsignedSpendingTx, inputIndex, witScriptPubKey, flags, amount)
unsignedWtxSigComponent
}
}
object WitnessGenerators extends WitnessGenerators
| Christewart/bitcoin-s-core | src/main/scala/org/bitcoins/core/gen/WitnessGenerators.scala | Scala | mit | 13,819 |
/*
* Copyright 2014 Lars Edenbrandt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package se.nimsa.sbx.metadata
import se.nimsa.sbx.app.GeneralProtocol._
import se.nimsa.sbx.dicom.DicomHierarchy._
import se.nimsa.sbx.dicom.DicomPropertyValue._
import se.nimsa.sbx.metadata.MetaDataProtocol._
import se.nimsa.sbx.seriestype.SeriesTypeDAO
import se.nimsa.sbx.util.DbUtil._
import slick.basic.{BasicAction, BasicStreamingAction, DatabaseConfig}
import slick.jdbc.{GetResult, JdbcProfile}
import scala.concurrent.{ExecutionContext, Future}
class PropertiesDAO(val dbConf: DatabaseConfig[JdbcProfile])(implicit ec: ExecutionContext) {
import MetaDataDAO._
import dbConf.profile.api._
val db = dbConf.db
val metaDataDao = new MetaDataDAO(dbConf)
val seriesTypeDao = new SeriesTypeDAO(dbConf)
import metaDataDao._
// *** Sources ***
private val toSeriesSource = (id: Long, sourceType: String, sourceName: String, sourceId: Long) => SeriesSource(id, Source(SourceType.withName(sourceType), sourceName, sourceId))
private val fromSeriesSource = (seriesSource: SeriesSource) => Option((seriesSource.id, seriesSource.source.sourceType.toString(), seriesSource.source.sourceName, seriesSource.source.sourceId))
private class SeriesSources(tag: Tag) extends Table[SeriesSource](tag, SeriesSources.name) {
def id = column[Long]("id", O.PrimaryKey)
def sourceType = column[String]("sourcetype")
def sourceName = column[String]("sourcename")
def sourceId = column[Long]("sourceid")
def seriesSourceToImageFKey = foreignKey("seriesSourceToImageFKey", id, metaDataDao.seriesQuery)(_.id, onUpdate = ForeignKeyAction.Cascade, onDelete = ForeignKeyAction.Cascade)
def * = (id, sourceType, sourceName, sourceId) <> (toSeriesSource.tupled, fromSeriesSource)
}
object SeriesSources {
val name = "SeriesSources"
}
private val seriesSourceQuery = TableQuery[SeriesSources]
// *** Tags ***
private val toSeriesTag = (id: Long, name: String) => SeriesTag(id, name)
private val fromSeriesTag = (seriesTag: SeriesTag) => Option((seriesTag.id, seriesTag.name))
class SeriesTagTable(tag: Tag) extends Table[SeriesTag](tag, SeriesTagTable.name) {
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def name = column[String]("name", O.Length(180))
def idxUniqueName = index("idx_unique_series_tag_name", name, unique = true)
def * = (id, name) <> (toSeriesTag.tupled, fromSeriesTag)
}
object SeriesTagTable {
val name = "SeriesTags"
}
private val seriesTagQuery = TableQuery[SeriesTagTable]
private val toSeriesSeriesTagRule = (seriesId: Long, seriesTagId: Long) => SeriesSeriesTag(seriesId, seriesTagId)
private val fromSeriesSeriesTagRule = (seriesSeriesTag: SeriesSeriesTag) => Option((seriesSeriesTag.seriesId, seriesSeriesTag.seriesTagId))
private class SeriesSeriesTagTable(tag: Tag) extends Table[SeriesSeriesTag](tag, SeriesSeriesTagTable.name) {
def seriesId = column[Long]("seriesid")
def seriesTagId = column[Long]("seriestagid")
def pk = primaryKey("pk_tag", (seriesId, seriesTagId))
def fkSeries = foreignKey("fk_series_seriesseriestag", seriesId, metaDataDao.seriesQuery)(_.id, onDelete = ForeignKeyAction.Cascade)
def fkSeriesType = foreignKey("fk_seriestag_seriesseriestag", seriesTagId, seriesTagQuery)(_.id, onDelete = ForeignKeyAction.Cascade)
def * = (seriesId, seriesTagId) <> (toSeriesSeriesTagRule.tupled, fromSeriesSeriesTagRule)
}
object SeriesSeriesTagTable {
val name = "SeriesSeriesTags"
}
private val seriesSeriesTagQuery = TableQuery[SeriesSeriesTagTable]
// Setup
def create(): Future[Unit] = createTables(dbConf, (SeriesSources.name, seriesSourceQuery), (SeriesTagTable.name, seriesTagQuery), (SeriesSeriesTagTable.name, seriesSeriesTagQuery))
def drop(): Future[Unit] = db.run {
(seriesSourceQuery.schema ++ seriesTagQuery.schema ++ seriesSeriesTagQuery.schema).drop
}
def clear(): Future[Unit] = db.run {
DBIO.seq(seriesSourceQuery.delete, seriesTagQuery.delete, seriesSeriesTagQuery.delete)
}
// Functions
def insertSeriesSourceAction(seriesSource: SeriesSource): DBIOAction[SeriesSource, NoStream, Effect.Write] = (seriesSourceQuery += seriesSource).map(_ => seriesSource)
def insertSeriesSource(seriesSource: SeriesSource): Future[SeriesSource] = db.run(insertSeriesSourceAction(seriesSource))
def updateSeriesSourceAction(seriesSource: SeriesSource): BasicAction[Int, NoStream, Effect.Write] =
seriesSourceQuery
.filter(_.id === seriesSource.id)
.update(seriesSource)
def updateSeriesSource(seriesSource: SeriesSource): Future[Int] = db.run(updateSeriesSourceAction(seriesSource))
def seriesSourcesByIdAction(seriesId: Long): BasicStreamingAction[Seq[SeriesSource], SeriesSource, Effect.Read] =
seriesSourceQuery.filter(_.id === seriesId).result
def seriesSourceById(seriesId: Long): Future[Option[SeriesSource]] = db.run(seriesSourcesByIdAction(seriesId).headOption)
def seriesSources: Future[Seq[SeriesSource]] = db.run {
seriesSourceQuery.result
}
def seriesTags: Future[Seq[SeriesTag]] = db.run {
seriesTagQuery.result
}
def insertSeriesTagAction(seriesTag: SeriesTag): DBIOAction[SeriesTag, NoStream, Effect.Write] =
(seriesTagQuery returning seriesTagQuery.map(_.id) += seriesTag)
.map(generatedId => seriesTag.copy(id = generatedId))
def insertSeriesTag(seriesTag: SeriesTag): Future[SeriesTag] = db.run(insertSeriesTagAction(seriesTag))
def seriesTagsForNameAction(name: String): BasicStreamingAction[Seq[SeriesTag], SeriesTag, Effect.Read] =
seriesTagQuery.filter(_.name === name).result
def seriesTagForName(name: String): Future[Option[SeriesTag]] = db.run(seriesTagsForNameAction(name).headOption)
def seriesTagForId(id: Long): Future[Option[SeriesTag]] = db.run(seriesTagQuery.filter(_.id === id).result.headOption)
def updateSeriesTag(seriesTag: SeriesTag): Future[Option[SeriesTag]] = db.run {
seriesTagQuery.filter(_.id === seriesTag.id).update(seriesTag)
}.map { updateCount =>
if (updateCount > 0) {
Some(seriesTag)
} else {
None
}
}
def listSeriesSources: Future[Seq[SeriesSource]] = db.run {
seriesSourceQuery.result
}
def listSeriesTags(startIndex: Long, count: Long, orderBy: Option[String], orderAscending: Boolean, filter: Option[String]): Future[Seq[SeriesTag]] = db.run {
val filtered = filter.map(f => seriesTagQuery.filter(_.name like s"%$f%")).getOrElse(seriesTagQuery)
val sorted = orderBy match {
case Some("id") => if (orderAscending) filtered.sortBy(_.id.asc) else filtered.sortBy(_.id.desc)
case Some("name") => if (orderAscending) filtered.sortBy(_.name.asc) else filtered.sortBy(_.name.desc)
case _ => filtered
}
sorted.drop(startIndex).take(count).result
}
def deleteSeriesTag(tagId: Long): Future[Unit] = db.run(seriesTagQuery.filter(_.id === tagId).delete.map(_ => {}))
def insertSeriesSeriesTagAction(seriesSeriesTag: SeriesSeriesTag): DBIOAction[SeriesSeriesTag, NoStream, Effect.Write] =
(seriesSeriesTagQuery += seriesSeriesTag).map(_ => seriesSeriesTag)
def insertSeriesSeriesTag(seriesSeriesTag: SeriesSeriesTag): Future[SeriesSeriesTag] =
db.run(insertSeriesSeriesTagAction(seriesSeriesTag))
def listSeriesSeriesTagsForSeriesId(seriesId: Long): Future[Seq[SeriesSeriesTag]] = db.run {
seriesSeriesTagQuery.filter(_.seriesId === seriesId).result
}
private def listSeriesSeriesTagsForSeriesTagIdAction(seriesTagId: Long) =
seriesSeriesTagQuery.filter(_.seriesTagId === seriesTagId).result
def listSeriesSeriesTagsForSeriesTagId(seriesTagId: Long): Future[Seq[SeriesSeriesTag]] =
db.run(listSeriesSeriesTagsForSeriesTagIdAction(seriesTagId))
def seriesSeriesTagsForSeriesTagIdAndSeriesIdAction(seriesTagId: Long, seriesId: Long): BasicStreamingAction[Seq[SeriesSeriesTag], SeriesSeriesTag, Effect.Read] =
seriesSeriesTagQuery.filter(_.seriesTagId === seriesTagId).filter(_.seriesId === seriesId).result
def seriesSeriesTagForSeriesTagIdAndSeriesId(seriesTagId: Long, seriesId: Long): Future[Option[SeriesSeriesTag]] =
db.run(seriesSeriesTagsForSeriesTagIdAndSeriesIdAction(seriesTagId, seriesId).headOption)
def removeSeriesSeriesTagAction(seriesTagId: Long, seriesId: Long): DBIOAction[Unit, NoStream, Effect.Write] =
seriesSeriesTagQuery.filter(_.seriesTagId === seriesTagId).filter(_.seriesId === seriesId).delete.map(_ => {})
def removeSeriesSeriesTag(seriesTagId: Long, seriesId: Long): Future[Unit] = db.run(removeSeriesSeriesTagAction(seriesTagId, seriesId))
def seriesTagsForSeriesAction(seriesId: Long): BasicStreamingAction[Seq[SeriesTag], SeriesTag, Effect.Read] = {
val innerJoin = for {
sst <- seriesSeriesTagQuery.filter(_.seriesId === seriesId)
stq <- seriesTagQuery if sst.seriesTagId === stq.id
} yield stq
innerJoin.result
}
def seriesTagsForSeries(seriesId: Long): Future[Seq[SeriesTag]] = db.run(seriesTagsForSeriesAction(seriesId))
def addAndInsertSeriesTagForSeriesIdAction(seriesTag: SeriesTag, seriesId: Long): DBIOAction[SeriesTag, NoStream, Effect.Read with Effect.Write with Effect.Read with Effect.Write] =
seriesTagsForNameAction(seriesTag.name)
.headOption
.flatMap(_
.map(DBIO.successful)
.getOrElse(insertSeriesTagAction(seriesTag)))
.flatMap { dbSeriesTag =>
seriesSeriesTagsForSeriesTagIdAndSeriesIdAction(dbSeriesTag.id, seriesId)
.headOption
.flatMap(_
.map(_ => DBIO.successful(dbSeriesTag))
.getOrElse(insertSeriesSeriesTagAction(SeriesSeriesTag(seriesId, dbSeriesTag.id)).map(_ => dbSeriesTag)))
}
def addAndInsertSeriesTagForSeriesId(seriesTag: SeriesTag, seriesId: Long): Future[SeriesTag] =
db.run(addAndInsertSeriesTagForSeriesIdAction(seriesTag, seriesId).transactionally)
def removeSeriesTagForSeriesId(seriesTagId: Long, seriesId: Long): Future[Unit] = db.run {
removeSeriesSeriesTagAction(seriesTagId, seriesId)
}
/**
* Delete input images. If any series, studies and/or patients become empty as a result of this, delete them too.
* Also, delete series tags no longer used, if any.
*
* @param imageIds IDs of images to delete
* @return the ids of deleted patients, studies, series and images
*/
def deleteFully(imageIds: Seq[Long]): Future[(Seq[Long], Seq[Long], Seq[Long], Seq[Long])] = {
val action = DBIO.sequence {
imageIds
.grouped(1000) // micro-batch to keep size of queries under control
.map(subset => deleteFullyBatch(subset))
}
db.run(action.transactionally)
.map {
// put the subsets back together again
_.foldLeft((Seq.empty[Long], Seq.empty[Long], Seq.empty[Long], Seq.empty[Long])) { (total, ids) =>
(total._1 ++ ids._1, total._2 ++ ids._2, total._3 ++ ids._3, total._4 ++ ids._4)
}
}
}
private def deleteFullyBatch(imageIds: Seq[Long]) = {
val images = imagesQuery.filter(_.id inSetBind imageIds) // batch this?
images.map(_.id).result.flatMap { imageIds =>
val uniqueSeriesIdsAction = images.map(_.seriesId).distinct.result
val deleteImagesAction = images.delete
// find empty series for images, then delete images
uniqueSeriesIdsAction.flatMap { uniqueSeriesIds =>
deleteImagesAction.flatMap { _ =>
DBIO.sequence(uniqueSeriesIds.map(seriesId =>
imagesQuery.filter(_.seriesId === seriesId).take(1).result.map {
case ims if ims.nonEmpty => None
case _ => Some(seriesId)
}
)).map(_.flatten)
}
}.flatMap { emptySeriesIds =>
// find empty studies for series, then delete empty series
val series = seriesQuery.filter(_.id inSetBind emptySeriesIds)
val uniqueStudyIdsAction = series.map(_.studyId).distinct.result
val deleteSeriesAction = series.delete
uniqueStudyIdsAction.flatMap { uniqueStudyIds =>
deleteSeriesAction.flatMap { _ =>
DBIO.sequence(uniqueStudyIds.map(studyId =>
seriesQuery.filter(_.studyId === studyId).take(1).result.map {
case ims if ims.nonEmpty => None
case _ => Some(studyId)
}
)).map(_.flatten)
}
}.flatMap { emptyStudyIds =>
// find empty patients for studies, then delete empty studies
val studies = studiesQuery.filter(_.id inSetBind emptyStudyIds)
val uniquePatientIdsAction = studies.map(_.patientId).distinct.result
val deleteStudiesAction = studies.delete
uniquePatientIdsAction.flatMap { uniquePatientIds =>
deleteStudiesAction.flatMap { _ =>
DBIO.sequence(uniquePatientIds.map(patientId =>
studiesQuery.filter(_.patientId === patientId).take(1).result.map {
case ims if ims.nonEmpty => None
case _ => Some(patientId)
}
)).map(_.flatten)
}
}.flatMap { emptyPatientIds =>
// delete empty patients
patientsQuery.filter(_.id inSetBind emptyPatientIds).delete
// return deleted ids for each level
.map(_ => (emptyPatientIds, emptyStudyIds, emptySeriesIds, imageIds))
}
}
}
}
}
def flatSeries(startIndex: Long, count: Long, orderBy: Option[String], orderAscending: Boolean, filter: Option[String], sourceRefs: Seq[SourceRef], seriesTypeIds: Seq[Long], seriesTagIds: Seq[Long]): Future[Seq[FlatSeries]] =
if (isWithAdvancedFiltering(sourceRefs, seriesTypeIds, seriesTagIds))
checkColumnExists(dbConf, orderBy, PatientsTable.name, StudiesTable.name, SeriesTable.name).flatMap { _ =>
db.run {
implicit val getResult: GetResult[FlatSeries] = metaDataDao.flatSeriesGetResult
val query =
metaDataDao.flatSeriesBasePart +
propertiesJoinPart(sourceRefs, seriesTypeIds, seriesTagIds) +
" where" +
metaDataDao.flatSeriesFilterPart(filter) +
andPart(filter, sourceRefs) +
sourcesPart(sourceRefs) +
andPart(filter, sourceRefs, seriesTypeIds) +
seriesTypesPart(seriesTypeIds) +
andPart(filter, sourceRefs, seriesTypeIds, seriesTagIds) +
seriesTagsPart(seriesTagIds) +
orderByPart(flatSeriesOrderBy(orderBy), orderAscending) +
pagePart(startIndex, count)
sql"#$query".as[FlatSeries]
}
}
else
metaDataDao.flatSeries(startIndex, count, orderBy, orderAscending, filter)
def propertiesJoinPart(sourceRefs: Seq[SourceRef], seriesTypeIds: Seq[Long], seriesTagIds: Seq[Long]): String =
singlePropertyJoinPart(sourceRefs, """ inner join "SeriesSources" on "Series"."id" = "SeriesSources"."id"""") +
singlePropertyJoinPart(seriesTypeIds, """ inner join "SeriesSeriesTypes" on "Series"."id" = "SeriesSeriesTypes"."seriesid"""") +
singlePropertyJoinPart(seriesTagIds, """ inner join "SeriesSeriesTags" on "Series"."id" = "SeriesSeriesTags"."seriesid"""")
def singlePropertyJoinPart(property: Seq[_ <: Any], part: String): String = if (property.isEmpty) "" else part
def patients(startIndex: Long, count: Long, orderBy: Option[String], orderAscending: Boolean, filter: Option[String], sourceRefs: Seq[SourceRef], seriesTypeIds: Seq[Long], seriesTagIds: Seq[Long]): Future[Seq[Patient]] =
if (isWithAdvancedFiltering(sourceRefs, seriesTypeIds, seriesTagIds))
checkColumnExists(dbConf, orderBy, PatientsTable.name).flatMap { _ =>
db.run {
implicit val getResult: GetResult[Patient] = patientsGetResult
val query =
patientsBasePart +
propertiesJoinPart(sourceRefs, seriesTypeIds, seriesTagIds) +
" where" +
patientsFilterPart(filter) +
andPart(filter, sourceRefs) +
sourcesPart(sourceRefs) +
andPart(filter, sourceRefs, seriesTypeIds) +
seriesTypesPart(seriesTypeIds) +
andPart(filter, sourceRefs, seriesTypeIds, seriesTagIds) +
seriesTagsPart(seriesTagIds) +
orderByPart(orderBy.map(o => s""""Patients"."$o""""), orderAscending) +
pagePart(startIndex, count)
sql"#$query".as[Patient]
}
}
else
metaDataDao.patients(startIndex, count, orderBy, orderAscending, filter)
def parseQueryOrder(optionalOrder: Option[QueryOrder]): (Option[String], Boolean) =
(optionalOrder.map(_.orderBy), optionalOrder.forall(_.orderAscending))
def wherePart(arrays: Seq[_ <: Any]*): String =
if (arrays.exists(_.nonEmpty))
" where "
else
""
def queryMainPart(startIndex: Long, count: Long, orderBy: Option[String], orderAscending: Boolean, sourceRefs: Seq[SourceRef], seriesTypeIds: Seq[Long], seriesTagIds: Seq[Long], queryProperties: Seq[QueryProperty]): String =
propertiesJoinPart(sourceRefs, seriesTypeIds, seriesTagIds) +
wherePart(queryProperties, sourceRefs, seriesTypeIds, seriesTagIds) +
queryPart(queryProperties) +
andPart(queryProperties, sourceRefs) +
sourcesPart(sourceRefs) +
andPart(queryProperties, sourceRefs, seriesTypeIds) +
seriesTypesPart(seriesTypeIds) +
andPart(queryProperties, sourceRefs, seriesTypeIds, seriesTagIds) +
seriesTagsPart(seriesTagIds) +
orderByPart(orderBy, orderAscending) +
pagePart(startIndex, count)
def queryPatients(startIndex: Long, count: Long, optionalOrder: Option[QueryOrder], queryProperties: Seq[QueryProperty], optionalFilters: Option[QueryFilters]): Future[Seq[Patient]] = {
val (orderBy, orderAscending) = parseQueryOrder(optionalOrder)
optionalFilters.filter { filters =>
isWithAdvancedFiltering(filters.seriesTagIds, filters.seriesTypeIds, filters.sourceRefs)
}.map { filters =>
checkColumnExists(dbConf, orderBy, PatientsTable.name, StudiesTable.name, SeriesTable.name).flatMap { _ =>
Future.sequence(queryProperties.map(qp => checkColumnExists(dbConf, qp.propertyName, PatientsTable.name, StudiesTable.name, SeriesTable.name))).flatMap { _ =>
db.run {
implicit val getResult: GetResult[Patient] = metaDataDao.patientsGetResult
val query =
metaDataDao.queryPatientsSelectPart +
queryMainPart(startIndex, count, orderBy.map(o => s""""Patients"."$o""""), orderAscending, filters.sourceRefs, filters.seriesTypeIds, filters.seriesTagIds, queryProperties)
sql"#$query".as[Patient]
}
}
}
}.getOrElse {
metaDataDao.queryPatients(startIndex, count, orderBy, orderAscending, queryProperties)
}
}
def queryStudies(startIndex: Long, count: Long, optionalOrder: Option[QueryOrder], queryProperties: Seq[QueryProperty], optionalFilters: Option[QueryFilters]): Future[Seq[Study]] = {
val (orderBy, orderAscending) = parseQueryOrder(optionalOrder)
optionalFilters.filter { filters =>
isWithAdvancedFiltering(filters.seriesTagIds, filters.seriesTypeIds, filters.sourceRefs)
}.map { filters =>
checkColumnExists(dbConf, orderBy, PatientsTable.name, StudiesTable.name, SeriesTable.name).flatMap { _ =>
Future.sequence(queryProperties.map(qp => checkColumnExists(dbConf, qp.propertyName, PatientsTable.name, StudiesTable.name, SeriesTable.name))).flatMap { _ =>
db.run {
implicit val getResult: GetResult[Study] = metaDataDao.studiesGetResult
val query =
metaDataDao.queryStudiesSelectPart +
queryMainPart(startIndex, count, orderBy.map(o => s""""Studies"."$o""""), orderAscending, filters.sourceRefs, filters.seriesTypeIds, filters.seriesTagIds, queryProperties)
sql"#$query".as[Study]
}
}
}
}.getOrElse {
metaDataDao.queryStudies(startIndex, count, orderBy, orderAscending, queryProperties)
}
}
def querySeries(startIndex: Long, count: Long, optionalOrder: Option[QueryOrder], queryProperties: Seq[QueryProperty], optionalFilters: Option[QueryFilters]): Future[Seq[Series]] = {
val (orderBy, orderAscending) = parseQueryOrder(optionalOrder)
optionalFilters.filter { filters =>
isWithAdvancedFiltering(filters.seriesTagIds, filters.seriesTypeIds, filters.sourceRefs)
}.map { filters =>
checkColumnExists(dbConf, orderBy, PatientsTable.name, StudiesTable.name, SeriesTable.name).flatMap { _ =>
Future.sequence(queryProperties.map(qp => checkColumnExists(dbConf, qp.propertyName, PatientsTable.name, StudiesTable.name, SeriesTable.name))).flatMap { _ =>
db.run {
implicit val getResult: GetResult[Series] = metaDataDao.seriesGetResult
val query =
metaDataDao.querySeriesSelectPart +
queryMainPart(startIndex, count, orderBy.map(o => s""""Series"."$o""""), orderAscending, filters.sourceRefs, filters.seriesTypeIds, filters.seriesTagIds, queryProperties)
sql"#$query".as[Series]
}
}
}
}.getOrElse {
metaDataDao.querySeries(startIndex, count, orderBy, orderAscending, queryProperties)
}
}
def queryImages(startIndex: Long, count: Long, optionalOrder: Option[QueryOrder], queryProperties: Seq[QueryProperty], optionalFilters: Option[QueryFilters]): Future[Seq[Image]] = {
val (orderBy, orderAscending) = parseQueryOrder(optionalOrder)
optionalFilters.filter { filters =>
isWithAdvancedFiltering(filters.seriesTagIds, filters.seriesTypeIds, filters.sourceRefs)
}.map { filters =>
checkColumnExists(dbConf, orderBy, PatientsTable.name, StudiesTable.name, SeriesTable.name, ImagesTable.name).flatMap { _ =>
Future.sequence(queryProperties.map(qp => checkColumnExists(dbConf, qp.propertyName, PatientsTable.name, StudiesTable.name, SeriesTable.name, ImagesTable.name))).flatMap { _ =>
db.run {
implicit val getResult: GetResult[Image] = metaDataDao.imagesGetResult
val query =
metaDataDao.queryImagesSelectPart +
queryMainPart(startIndex, count, orderBy.map(o => s""""Images"."$o""""), orderAscending, filters.sourceRefs, filters.seriesTypeIds, filters.seriesTagIds, queryProperties)
sql"#$query".as[Image]
}
}
}
}.getOrElse {
metaDataDao.queryImages(startIndex, count, orderBy, orderAscending, queryProperties)
}
}
def queryFlatSeries(startIndex: Long, count: Long, optionalOrder: Option[QueryOrder], queryProperties: Seq[QueryProperty], optionalFilters: Option[QueryFilters]): Future[Seq[FlatSeries]] = {
val (orderBy, orderAscending) = parseQueryOrder(optionalOrder)
optionalFilters.filter { filters =>
isWithAdvancedFiltering(filters.seriesTagIds, filters.seriesTypeIds, filters.sourceRefs)
}.map { filters =>
checkColumnExists(dbConf, orderBy, PatientsTable.name, StudiesTable.name, SeriesTable.name).flatMap { _ =>
Future.sequence(queryProperties.map(qp => checkColumnExists(dbConf, qp.propertyName, PatientsTable.name, StudiesTable.name, SeriesTable.name))).flatMap { _ =>
db.run {
implicit val getResult: GetResult[FlatSeries] = metaDataDao.flatSeriesGetResult
val query =
metaDataDao.flatSeriesBasePart +
queryMainPart(startIndex, count, flatSeriesOrderBy(orderBy), orderAscending, filters.sourceRefs, filters.seriesTypeIds, filters.seriesTagIds, queryProperties)
sql"#$query".as[FlatSeries]
}
}
}
}.getOrElse {
metaDataDao.queryFlatSeries(startIndex, count, orderBy, orderAscending, queryProperties)
}
}
def isWithAdvancedFiltering(arrays: Seq[_ <: Any]*): Boolean = arrays.exists(_.nonEmpty)
def patientsBasePart =
s"""select distinct("Patients"."id"),
"Patients"."patientName","Patients"."patientID","Patients"."patientBirthDate","Patients"."patientSex"
from "Series"
inner join "Studies" on "Series"."studyId" = "Studies"."id"
inner join "Patients" on "Studies"."patientId" = "Patients"."id""""
def andPart(target: Seq[_ <: Any]): String = if (target.nonEmpty) " and" else ""
def andPart(array: Seq[_ <: Any], target: Seq[_ <: Any]): String = if (array.nonEmpty && target.nonEmpty) " and" else ""
def andPart(array1: Seq[_ <: Any], array2: Seq[_ <: Any], target: Seq[_ <: Any]): String = if ((array1.nonEmpty || array2.nonEmpty) && target.nonEmpty) " and" else ""
def andPart(array1: Seq[_ <: Any], array2: Seq[_ <: Any], array3: Seq[_ <: Any], target: Seq[_ <: Any]): String = if ((array1.nonEmpty || array2.nonEmpty || array3.nonEmpty) && target.nonEmpty) " and" else ""
def andPart(option: Option[Any], target: Seq[_ <: Any]): String = if (option.isDefined && target.nonEmpty) " and" else ""
def andPart(option: Option[Any], array: Seq[_ <: Any], target: Seq[_ <: Any]): String = if ((option.isDefined || array.nonEmpty) && target.nonEmpty) " and" else ""
def andPart(option: Option[Any], array1: Seq[_ <: Any], array2: Seq[_ <: Any], target: Seq[_ <: Any]): String = if ((option.isDefined || array1.nonEmpty || array2.nonEmpty) && target.nonEmpty) " and" else ""
def sourcesPart(sourceRefs: Seq[SourceRef]): String =
if (sourceRefs.isEmpty)
""
else
" (" + sourceRefs.map(sourceTypeId =>
s""""SeriesSources"."sourcetype" = '${sourceTypeId.sourceType}' and "SeriesSources"."sourceid" = ${sourceTypeId.sourceId}""")
.mkString(" or ") + ")"
def seriesTypesPart(seriesTypeIds: Seq[Long]): String =
if (seriesTypeIds.isEmpty)
""
else
" (" + seriesTypeIds.map(seriesTypeId =>
s""""SeriesSeriesTypes"."seriestypeid" = $seriesTypeId""")
.mkString(" or ") + ")"
def seriesTagsPart(seriesTagIds: Seq[Long]): String =
if (seriesTagIds.isEmpty)
""
else
" (" + seriesTagIds.map(seriesTagId =>
s""""SeriesSeriesTags"."seriestagid" = $seriesTagId""")
.mkString(" or ") + ")"
def studiesGetResult = GetResult(r =>
Study(r.nextLong, r.nextLong, StudyInstanceUID(r.nextString), StudyDescription(r.nextString), StudyDate(r.nextString), StudyID(r.nextString), AccessionNumber(r.nextString), PatientAge(r.nextString)))
def studiesForPatient(startIndex: Long, count: Long, patientId: Long, sourceRefs: Seq[SourceRef], seriesTypeIds: Seq[Long], seriesTagIds: Seq[Long]): Future[Seq[Study]] = {
if (isWithAdvancedFiltering(sourceRefs, seriesTypeIds, seriesTagIds))
db.run {
implicit val getResult: GetResult[Study] = studiesGetResult
val basePart =
s"""select distinct("Studies"."id"),
"Studies"."patientId","Studies"."studyInstanceUID","Studies"."studyDescription","Studies"."studyDate","Studies"."studyID","Studies"."accessionNumber","Studies"."patientAge"
from "Series"
inner join "Studies" on "Series"."studyId" = "Studies"."id""""
val wherePart =
s"""
where
"Studies"."patientId" = $patientId"""
val query = basePart +
propertiesJoinPart(sourceRefs, seriesTypeIds, seriesTagIds) +
wherePart +
andPart(sourceRefs) +
sourcesPart(sourceRefs) +
andPart(seriesTypeIds) +
seriesTypesPart(seriesTypeIds) +
andPart(seriesTagIds) +
seriesTagsPart(seriesTagIds) +
pagePart(startIndex, count)
sql"#$query".as[Study]
}
else
metaDataDao.studiesForPatient(startIndex, count, patientId)
}
def seriesGetResult = GetResult(r =>
Series(r.nextLong, r.nextLong, SeriesInstanceUID(r.nextString), SeriesDescription(r.nextString), SeriesDate(r.nextString), Modality(r.nextString), ProtocolName(r.nextString), BodyPartExamined(r.nextString), Manufacturer(r.nextString), StationName(r.nextString), FrameOfReferenceUID(r.nextString)))
def seriesForStudy(startIndex: Long, count: Long, studyId: Long, sourceRefs: Seq[SourceRef], seriesTypeIds: Seq[Long], seriesTagIds: Seq[Long]): Future[Seq[Series]] = {
if (isWithAdvancedFiltering(sourceRefs, seriesTypeIds, seriesTagIds))
db.run {
implicit val getResult: GetResult[Series] = seriesGetResult
val basePart =
s"""select distinct("Series"."id"),
"Series"."studyId","Series"."seriesInstanceUID","Series"."seriesDescription","Series"."seriesDate","Series"."modality","Series"."protocolName","Series"."bodyPartExamined","Series"."manufacturer","Series"."stationName","Series"."frameOfReferenceUID"
from "Series""""
val wherePart =
s"""
where
"Series"."studyId" = $studyId"""
val query = basePart +
propertiesJoinPart(sourceRefs, seriesTypeIds, seriesTagIds) +
wherePart +
andPart(sourceRefs) +
sourcesPart(sourceRefs) +
andPart(seriesTypeIds) +
seriesTypesPart(seriesTypeIds) +
andPart(seriesTagIds) +
seriesTagsPart(seriesTagIds) +
pagePart(startIndex, count)
sql"#$query".as[Series]
}
else
metaDataDao.seriesForStudy(startIndex, count, studyId)
}
def addMetaData(patient: Patient, study: Study, series: Series, image: Image, source: Source): Future[MetaDataAdded] = {
val seriesSource = SeriesSource(-1, source)
val addAction =
patientsByNameAndIDAction(patient).headOption.flatMap { patientMaybe =>
patientMaybe.map { dbp =>
val updatePatient = patient.copy(id = dbp.id)
updatePatientAction(updatePatient).map(_ => (updatePatient, false))
}.getOrElse {
insertPatientAction(patient).map((_, true))
}
}.flatMap {
case (dbPatient, patientAdded) =>
studiesByUidAndPatientAction(study, dbPatient).headOption.flatMap { studyMaybe =>
studyMaybe.map { dbs =>
val updateStudy = study.copy(id = dbs.id, patientId = dbs.patientId)
updateStudyAction(updateStudy).map(_ => (updateStudy, false))
}.getOrElse {
insertStudyAction(study.copy(patientId = dbPatient.id)).map((_, true))
}
}.flatMap {
case (dbStudy, studyAdded) =>
seriesByUidAndStudyAction(series, dbStudy).headOption.flatMap { seriesMaybe =>
seriesMaybe.map { dbs =>
val updateSeries = series.copy(id = dbs.id, studyId = dbs.studyId)
updateSeriesAction(updateSeries).map(_ => (updateSeries, false))
}.getOrElse {
insertSeriesAction(series.copy(studyId = dbStudy.id)).map((_, true))
}
}.flatMap {
case (dbSeries, seriesAdded) =>
imagesByUidAndSeriesAction(image, dbSeries).headOption.flatMap { imageMaybe =>
imageMaybe.map { dbi =>
val updateImage = image.copy(id = dbi.id, seriesId = dbi.seriesId)
updateImageAction(updateImage).map(_ => (updateImage, false))
}.getOrElse {
insertImageAction(image.copy(seriesId = dbSeries.id)).map((_, true))
}
}.flatMap {
case (dbImage, imageAdded) =>
seriesSourcesByIdAction(dbSeries.id).headOption.flatMap { seriesSourceMaybe =>
seriesSourceMaybe.map { dbss =>
val updateSeriesSource = seriesSource.copy(id = dbss.id)
updateSeriesSourceAction(updateSeriesSource).map(_ => updateSeriesSource)
}.getOrElse {
insertSeriesSourceAction(seriesSource.copy(id = dbSeries.id))
}
}.map { dbSeriesSource =>
MetaDataAdded(dbPatient, dbStudy, dbSeries, dbImage,
patientAdded, studyAdded, seriesAdded, imageAdded,
dbSeriesSource.source)
}
}
}
}
}
db.run(addAction.transactionally)
}
def addSeriesTagToSeries(seriesTag: SeriesTag, seriesId: Long): Future[Option[SeriesTag]] = db.run {
seriesQuery.filter(_.id === seriesId).result.headOption
.map(_.map(_ => addAndInsertSeriesTagForSeriesIdAction(seriesTag, seriesId)))
.unwrap
}
}
| slicebox/slicebox | src/main/scala/se/nimsa/sbx/metadata/PropertiesDAO.scala | Scala | apache-2.0 | 33,205 |
package org.me.hotel
class HotelTest extends UnitTest("Hotel") {
it should "forbid creating a Hotel with no rooms" in {
Hotel()
an [IllegalArgumentException] should be thrownBy {
Hotel(rooms = List())
}
}
it should "forbid checking in if there are no free rooms" in {
val hotel = Hotel(List(Room(1).checkin(Guest("Victor"))))
an [IllegalArgumentException] should be thrownBy {
hotel.checkin("Fish")
}
}
it should "allow checking in" in {
val busyRooms = Hotel()
.checkin("Salvatore")
.rooms.filter(room => !room.isFree())
busyRooms should have size 1
busyRooms.forall(_.guest == Option("Salvatore"))
}
} | rnowley/SonicScrewDriver | example/scala/unitTestExample/src/test/scala/org/me/hotel/HotelTest.scala | Scala | mit | 644 |
/*
* The MIT License (MIT)
* <p>
* Copyright (c) 2018
* <p>
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* <p>
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
* <p>
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package io.techcode.streamy.tcp.event
import java.net.InetSocketAddress
import akka.actor.DeadLetterSuppression
import io.techcode.streamy.tcp.component.TcpFlow
/**
* Tcp events.
*/
object TcpEvent {
// Marker interface for tcp events
sealed trait All extends DeadLetterSuppression
object Server {
/**
* This event is fired when a tcp server connection is created.
*
* @param localAddress Local IP Socket address.
* @param remoteAddress Remote IP Socket address.
*/
case class ConnectionCreated(localAddress: InetSocketAddress, remoteAddress: InetSocketAddress) extends All
/**
* This event is fired when a tcp server connection is closed.
*
* @param localAddress Local IP Socket address.
* @param remoteAddress Remote IP Socket address.
*/
case class ConnectionClosed(localAddress: InetSocketAddress, remoteAddress: InetSocketAddress) extends All
}
object Client {
/**
* This event is fired when a tcp client connection is created.
*
* @param config configuration of the tcp connection created.
*/
case class ConnectionCreated(config: TcpFlow.Client.Config) extends All
/**
* This event is fired when a tcp client connection is closed.
*
* @param config configuration of the tcp connection closed.
*/
case class ConnectionClosed(config: TcpFlow.Client.Config) extends All
}
}
| amannocci/streamy | plugin-tcp/src/main/scala/io/techcode/streamy/tcp/event/TcpEvent.scala | Scala | mit | 2,596 |
import scala.reflect.runtime.universe._
import scala.reflect.runtime.{universe => ru}
import scala.reflect.runtime.{currentMirror => cm}
import scala.tools.reflect.ToolBox
object foo {
class Expression {
override def toString = "Expression"
}
}
object Test extends dotty.runtime.LegacyApp {
val code = reify {
List(new foo.Expression, new foo.Expression)
};
val toolbox = cm.mkToolBox()
val evaluated = toolbox.eval(code.tree)
println("evaluated = " + evaluated)
}
| yusuke2255/dotty | tests/disabled/macro/run/reify_typerefs_3a.scala | Scala | bsd-3-clause | 490 |
val t: String => String = _.toUpperCase
t(/*caret*/)
//v1: String | triggerNZ/intellij-scala | testdata/parameterInfo/functionParameterInfo/functionType/FunctionType.scala | Scala | apache-2.0 | 66 |
package org.yotchang4s.pixiv.novel
trait NovelComponent {
val novel: NovelRepository
trait NovelRepository {
}
} | yotchang4s/yapix | src/org/yotchang4s/pixiv/novel/NovelComponent.scala | Scala | bsd-3-clause | 120 |
package me.rjfarmer.rlh.retriever
import akka.actor._
import me.rjfarmer.rlh.api.HasTimestamp
import me.rjfarmer.rlh.cache.EhcCache
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}
object Collector {
def props[K, V <: HasTimestamp](cache: EhcCache[K,V], cached: Map[K,V], numItems: Int, replyTo: ActorRef, timeout: FiniteDuration): Props = {
Props(new Collector(cache, cached, numItems, replyTo, timeout))
}
case object Timeout
case class Result[K, V](item: Retrievable[K],
result: Try[V]) {
def pair: (K,V) = (item.key, result.get)
}
}
/** Collect a number of results or return incomplete result after timeout */
class Collector[K,V <: HasTimestamp] (cache: EhcCache[K,V], cached: Map[K,V], numResults: Int, replyTo: ActorRef, timeout: FiniteDuration)
extends Actor with ActorLogging {
type TResult = Collector.Result[K,V]
var received: Map[K,V] = Map()
var numErrors = 0
var replied = false
override def preStart(): Unit = {
import scala.concurrent.ExecutionContext.Implicits.global
super.preStart()
context.system.scheduler.scheduleOnce(timeout, self, Collector.Timeout)
}
override def receive: Receive = {
case result: TResult =>
if (! replied) {
// log.debug("received: {}", result)
result.result match {
case Success(v) =>
received += result.pair
case Failure(ex) =>
numErrors += 1
}
if (received.size + numErrors == numResults) {
finish(false)
}
}
case Collector.Timeout =>
// XXX actor is only destroyed after timeout expires ...
finish(true)
case msg =>
log.warning("unknown message: {}", msg)
}
def finish(poison: Boolean): Unit = {
if (! replied) {
val result = cached ++ received
replyTo ! result
replied = true
}
if (poison) {
self ! PoisonPill
}
}
}
| random-j-farmer/little-helper | app/jvm/src/main/scala/me/rjfarmer/rlh/retriever/Collector.scala | Scala | mit | 1,969 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.streaming.state.impl
import org.apache.gearpump.Time.MilliSeconds
import org.apache.gearpump.streaming.transaction.api.{CheckpointStore, CheckpointStoreFactory}
/**
* an in memory store provided for test
* should not be used in real cases
*/
class InMemoryCheckpointStore extends CheckpointStore {
private var checkpoints = Map.empty[MilliSeconds, Array[Byte]]
override def persist(timestamp: MilliSeconds, checkpoint: Array[Byte]): Unit = {
checkpoints += timestamp -> checkpoint
}
override def recover(timestamp: MilliSeconds): Option[Array[Byte]] = {
checkpoints.get(timestamp)
}
override def close(): Unit = {
checkpoints = Map.empty[MilliSeconds, Array[Byte]]
}
}
class InMemoryCheckpointStoreFactory extends CheckpointStoreFactory {
override def getCheckpointStore(name: String): CheckpointStore = {
new InMemoryCheckpointStore
}
}
| manuzhang/incubator-gearpump | streaming/src/main/scala/org/apache/gearpump/streaming/state/impl/InMemoryCheckpointStore.scala | Scala | apache-2.0 | 1,717 |
package util
import org.joda.time.DateTime
object Joda {
implicit def dateTimeOrdering: Ordering[DateTime] = Ordering.fromLessThan(_ isBefore _)
}
| metaxmx/FridayNightBeer | modules/datamodel/src/main/scala/util/Joda.scala | Scala | apache-2.0 | 164 |
/**
* ---------------------------------------------------------------------------
*
* Copyright (c) 2011 Dan Simpson
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
* ---------------------------------------------------------------------------
*/
package org.ds.satchel
import org.scalatest._
import org.scalatest.matchers.ShouldMatchers
class ServletSpec extends FlatSpec with ShouldMatchers {
"A satchel servlet" should "do stuff" in {
}
} | dansimpson/satchel | scalatra/src/test/scala/org/ds/satchel/ServletSpec.scala | Scala | mit | 1,484 |
package org.helgoboss.scala_osgi_metatype
/**
* Contains adapters which translate the Scala OSGi Metatype interfaces to the ones defined in the native OSGi API.
*/
package object adapters {
}
| helgoboss/scala-osgi-metatype | src/main/scala/org/helgoboss/scala_osgi_metatype/adapters/package.scala | Scala | mit | 196 |
package com.twitter.util
import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
import java.util.zip.{GZIPInputStream, GZIPOutputStream}
import org.apache.commons.codec.binary.Base64
import com.twitter.io.StreamIO
trait StringEncoder {
def encode(bytes: Array[Byte]): String = new String(bytes)
def decode(str: String): Array[Byte] = str.getBytes
}
trait Base64StringEncoder extends StringEncoder {
private[this] def codec = new Base64()
override def encode(bytes: Array[Byte]): String = {
codec.encodeToString(bytes)
}
override def decode(str: String): Array[Byte] =
codec.decode(str)
}
object StringEncoder extends StringEncoder
object Base64StringEncoder extends Base64StringEncoder
/**
* A collection of utilities for encoding strings and byte arrays to and decoding from strings
* compressed from with gzip.
*
* This trait is thread-safe because there are no streams shared outside of method scope, and
* therefore no contention for shared byte arrays.
*
* The encoding for strings is UTF-8.
*
* gzipping inherently includes base64 encoding (the GZIP utilities from java will complain
* otherwise!)
*/
trait GZIPStringEncoder extends StringEncoder {
override def encode(bytes: Array[Byte]): String = {
val baos = new ByteArrayOutputStream
val gos = new GZIPOutputStream(baos)
gos.write(bytes)
gos.finish()
Base64StringEncoder.encode(baos.toByteArray)
}
def encodeString(str: String) = encode(str.getBytes("UTF-8"))
override def decode(str: String): Array[Byte] = {
val baos = new ByteArrayOutputStream
StreamIO.copy(new GZIPInputStream(new ByteArrayInputStream(Base64StringEncoder.decode(str))), baos)
baos.toByteArray
}
def decodeString(str: String): String = new String(decode(str), "UTF-8")
}
object GZIPStringEncoder extends GZIPStringEncoder
| luciferous/util | util-codec/src/main/scala/com/twitter/util/StringEncoder.scala | Scala | apache-2.0 | 1,847 |
package haru.dao
import scala.slick.driver.MySQLDriver.simple._
import scala.slick.lifted.ProvenShape
import scala.slick.lifted.Tag
import java.sql.SQLIntegrityConstraintViolationException
import scala.slick.jdbc.{ GetResult, StaticQuery => Q }
import Q.interpolation
object WebHookDao extends DatabasePool {
val Latestqnrs_table: TableQuery[Latestqnrs] = TableQuery[Latestqnrs]
case class Latestqnr(id: Option[Int], applicationid:String, messagetype: String, content: String)
class Latestqnrs(tag: Tag) extends Table[Latestqnr](tag, "Latestqnr") {
def id = column[Int]("ID", O.PrimaryKey, O.AutoInc) // This is the primary key column
def applicationid = column[String]("APPLICATIONID")
def messagetype = column[String]("MESSAGETYPE")
def content = column[String]("CONTENT")
def * = (id.?, applicationid, messagetype, content) <> (Latestqnr.tupled, Latestqnr.unapply)
}
def insertLatestQnR(applicationid:String, messagetype : String, content:String): Int = databasePool withSession {
implicit session =>
// 동일한 이름의 프로젝트가 있다면 Error처리..
Latestqnrs_table += Latestqnr(None, applicationid, messagetype, content)
}
def getRecentLatestQnR(applicationid: String): List[Map[String, Any]] = databasePool withSession {
implicit session =>
val query = sql"""
select messagetype, content, UNIX_TIMESTAMP(createdat)
from Latestqnr
where applicationid = $applicationid
order by createdat desc limit 0, 5
""".as[(String, String, Long)]
val latestqnrlist = query.list
var latestqnrlists: List[Map[String, Any]] = List();
latestqnrlist.foreach { p =>
latestqnrlists ++= List(Map("messagetype" -> p._1, "content" -> p._2, "time" -> p._3))
}
return latestqnrlists
}
} | haruio/haru-admin | src/main/scala/haru/dao/WebHookDao.scala | Scala | mit | 1,819 |
package org.littlewings.javaee7
import java.io.File
import org.apache.catalina.startup.Tomcat
import org.apache.tomcat.util.descriptor.web.ContextResource
import org.scalatest.{BeforeAndAfterAll, Suite}
trait EmbeddedTomcatCdiSupport extends Suite with BeforeAndAfterAll {
protected val port: Int = 8080
protected val tomcat: Tomcat = new Tomcat
protected val baseDir: File = createTempDir("tomcat", port)
protected val docBaseDir: File = createTempDir("tomcat-docbase", port)
override def beforeAll(): Unit = {
tomcat.setPort(port)
tomcat.setBaseDir(baseDir.getAbsolutePath)
val context =
tomcat.addWebapp("", docBaseDir.getAbsolutePath)
context.addParameter("org.jboss.weld.environment.servlet.archive.isolation", "false")
context.addParameter("resteasy.injector.factory", "org.jboss.resteasy.cdi.CdiInjectorFactory")
tomcat.enableNaming()
val resource = new ContextResource
resource.setAuth("Container")
resource.setName("BeanManager")
resource.setType("javax.enterprise.inject.spi.BeanManager")
resource.setProperty("factory", "org.jboss.weld.resources.ManagerObjectFactory")
context.getNamingResources.addResource(resource)
tomcat.start()
}
override def afterAll(): Unit = {
tomcat.stop()
tomcat.destroy()
deleteDirs(baseDir)
deleteDirs(docBaseDir)
}
private def createTempDir(prefix: String, port: Int): File = {
val tempDir = File.createTempFile(s"${prefix}.", s".${port}")
tempDir.delete()
tempDir.mkdir()
tempDir.deleteOnExit()
tempDir
}
private def deleteDirs(file: File): Unit = {
file
.listFiles
.withFilter(f => f.getName != "." && f.getName != "..")
.foreach {
case d if d.isDirectory => deleteDirs(d)
case f => f.delete()
}
file.delete()
}
}
| kazuhira-r/javaee7-scala-examples | cdi-programmatic-lookup/src/test/scala/org/littlewings/javaee7/EmbeddedTomcatCdiSupport.scala | Scala | mit | 1,832 |
/*
* This file is part of the "silex" library of helpers for Apache Spark.
*
* Copyright (c) 2016 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package com.redhat.et.silex.rdd.split
import scala.reflect.ClassTag
import org.apache.spark.storage.StorageLevel
import org.apache.spark.rdd.RDD
import org.apache.spark.Logging
/**
* Enhances RDDs with methods for splitting RDDs based on predicates or other functions
*/
class SplitRDDFunctions[T :ClassTag](self: RDD[T]) extends Logging with Serializable {
import scala.collection.mutable.ArrayBuffer
import com.redhat.et.silex.rdd.multiplex.implicits._
import SplitRDDFunctions.defaultSL
/**
* Split an RDD into two output RDDs, using a predicate function
* @param f The predicate function to split with
* @param persist The storage level to use for the intermediate result.
* @return A pair of RDDs. The first output contains rows for which the predicate was true, and
* the second contains rows for which the predicate was false.
*/
def splitFilter(f: T => Boolean,
persist: StorageLevel = defaultSL): (RDD[T], RDD[T]) = {
self.flatMux2Partitions((data: Iterator[T]) => {
val (pass, fail) = (ArrayBuffer.empty[T], ArrayBuffer.empty[T])
data.foreach { e => (if (f(e)) pass else fail) += e }
(pass, fail)
}, persist)
}
/**
* Split an RDD into two output RDDs, using a function that returns an Either[L, R]
* @param f A function that returns an Either[L, R]
* @param persist The storage level to use for the intermediate result.
* @return A pair of RDDs. The first output contains rows for which the function output was Left[L],
* and the second contains rows for which the function output was Right[R]
*/
def splitEither[L :ClassTag, R :ClassTag](f: T => Either[L, R],
persist: StorageLevel = defaultSL): (RDD[L], RDD[R]) = {
self.flatMux2Partitions((data: Iterator[T]) => {
val (left, right) = (ArrayBuffer.empty[L], ArrayBuffer.empty[R])
data.foreach { e => f(e).fold(lv => left += lv, rv => right += rv) }
(left, right)
}, persist)
}
}
/** Definitions used by the SplitRDDFunctions instances */
object SplitRDDFunctions {
/** The default storage level used for intermediate splitting results */
val defaultSL = StorageLevel.MEMORY_ONLY
}
/** Implicit conversions to enhance RDDs with splitting methods */
object implicits {
import scala.language.implicitConversions
implicit def splitRDDFunctions[T :ClassTag](rdd: RDD[T]): SplitRDDFunctions[T] =
new SplitRDDFunctions(rdd)
}
| erikerlandson/silex | src/main/scala/com/redhat/et/silex/rdd/split.scala | Scala | apache-2.0 | 3,098 |
object SCL9473 {
trait Sys[S <: Sys[S]] {
type I
def foo(tx: Any): Int
}
def prepare[S <: Sys[S], I1 <: Sys[I1]](system: S { type I = I1 }): Any = {
/*start*/system.foo(123)/*end*/
}
}
//Int | ilinum/intellij-scala | testdata/typeInference/bugs5/SCL9473.scala | Scala | apache-2.0 | 212 |
package org.jetbrains.plugins.scala
package codeInspection.booleans
import org.jetbrains.plugins.scala.base.ScalaLightCodeInsightFixtureTestAdapter
/**
* Nikolay.Tropin
* 4/24/13
*/
class SimplifyBooleanInspectionTest extends ScalaLightCodeInsightFixtureTestAdapter {
val s = ScalaLightCodeInsightFixtureTestAdapter.SELECTION_START
val e = ScalaLightCodeInsightFixtureTestAdapter.SELECTION_END
val annotation = "Simplify boolean expression"
private def check(text: String) {
checkTextHasError(text, annotation, classOf[SimplifyBooleanInspection])
}
private def testFix(text: String, result: String, hint: String) {
testQuickFix(text.replace("\\r", ""), result.replace("\\r", ""), hint, classOf[SimplifyBooleanInspection])
}
private def checkHasNoErrors(text: String) {
checkTextHasNoErrors(text, annotation, classOf[SimplifyBooleanInspection])
}
def test_NotTrue() {
val selectedText = s"$s!true$e"
check(selectedText)
val text = "!true"
val result = "false"
val hint = "Simplify !true"
testFix(text, result, hint)
}
def test_TrueEqualsA() {
val selectedText =
s"""val a = true
|${s}true == a$e""".stripMargin
check(selectedText)
val text =
"""val a = true
|true == a""".stripMargin
val result = """val a = true
|a""".stripMargin
val hint = "Simplify true == a"
testFix(text, result, hint)
}
def test_TrueAndA() {
val selectedText =
s"""val a = true
|${s}true && a$e""".stripMargin
check(selectedText)
val text =
"""val a = true
|true && a""".stripMargin
val result = """val a = true
|a""".stripMargin
val hint = "Simplify true && a"
testFix(text, result, hint)
}
def test_AOrFalse() {
val selectedText = s"""val a = true
|${s}a | false$e""".stripMargin
check(selectedText)
val text = """val a = true
|a | false""".stripMargin
val result = """val a = true
|a""".stripMargin
val hint = "Simplify a | false"
testFix(text, result, hint)
}
def test_ExternalExpression() {
val selectedText = s"""
|val a = true
|${s}true && (a || false)$e
""".stripMargin
check(selectedText)
val text = s"""
|val a = true
|true && (a || false)""".stripMargin
val result = """
|val a = true
|a""".stripMargin
val hint = "Simplify true && (a || false)"
testFix(text, result, hint)
}
def test_InternalExpression() {
val selectedText =
s"""
|val a = true
|true && ($s<caret>a || false$e)
""".stripMargin
check(selectedText)
val text = s"""
|val a = true
|true && (<caret>a || false)
""".stripMargin
val result = s"""
|val a = true
|true && a
""".stripMargin
val hint = "Simplify a || false"
testFix(text, result, hint)
}
def test_TrueNotEqualsA() {
val selectedText = s"""val a = true
|val flag: Boolean = ${s}true != a$e""".stripMargin
check(selectedText)
val text = s"""val a = true
|val flag: Boolean = true != a""".stripMargin
val result = """val a = true
|val flag: Boolean = !a""".stripMargin
val hint = "Simplify true != a"
testFix(text, result, hint)
}
def test_SimplifyInParentheses() {
val selectedText = s"""val a = true
|!(${s}true != a$e)""".stripMargin
check(selectedText)
val text = """val a = true
|!(true != a)""".stripMargin
val result = """val a = true
|!(!a)""".stripMargin
val hint = "Simplify true != a"
testFix(text, result, hint)
}
def test_TrueAsAny() {
val text =
"""
|def trueAsAny: Any = {
| true
|}
|if (trueAsAny == true) {
| println("true")
|} else {
| println("false")
|}
|
""".stripMargin.replace("\\r", "").trim
checkHasNoErrors(text)
}
}
| LPTK/intellij-scala | test/org/jetbrains/plugins/scala/codeInspection/booleans/SimplifyBooleanInspectionTest.scala | Scala | apache-2.0 | 4,149 |
package HourRank_6
import utils.SetInt
/**
* Created by yujieshui on 2016/3/3.
*/
object KittyAndKatty {
def readSeqInt() = io.StdIn.readLine().split(" ").toList.map(_.toInt)
type User = Boolean
object Gamer extends Enumeration {
val kitty = Value("Kitty")
val katty = Value("Katty")
}
import Gamer._
def play(list: Int): Gamer.Value = {
val sum = (1 to list).sum
val lastUser = if (list % 2 == 0) kitty else katty
sum % 3 match {
case 1 => kitty
case 2 => katty
case 0 => lastUser
}
}
def main(args: Array[String]) {
val n :: Nil = readSeqInt()
val data = 1 to n map (_ => readSeqInt().head)
val out = (data map play)
println((data,out,right).zipped map {
case (d,o, r) => (d,o,r,o.toString == r)
}filter (_._4 == false)
)
}
val right =List(
"Kitty",
"Katty",
"Katty",
"Katty",
"Katty",
"Kitty",
"Kitty",
"Katty",
"Kitty",
"Kitty",
"Kitty",
"Kitty",
"Katty",
"Kitty",
"Kitty",
"Katty",
"Katty",
"Kitty",
"Katty",
"Katty",
"Katty",
"Katty",
"Kitty",
"Kitty",
"Katty",
"Katty",
"Kitty",
"Katty",
"Kitty",
"Katty",
"Kitty",
"Katty",
"Katty",
"Katty",
"Kitty",
"Kitty",
"Kitty",
"Katty",
"Katty",
"Kitty",
"Katty",
"Katty",
"Katty",
"Kitty",
"Kitty",
"Kitty",
"Katty",
"Katty",
"Katty",
"Kitty",
"Kitty",
"Katty",
"Kitty",
"Katty",
"Kitty",
"Kitty",
"Kitty",
"Katty",
"Kitty",
"Kitty",
"Katty",
"Katty",
"Katty",
"Kitty",
"Kitty",
"Katty",
"Kitty",
"Kitty",
"Kitty",
"Kitty",
"Katty",
"Katty",
"Katty",
"Katty",
"Katty",
"Katty",
"Katty",
"Kitty",
"Kitty",
"Katty",
"Kitty",
"Kitty",
"Kitty",
"Kitty",
"Katty",
"Katty",
"Kitty",
"Katty",
"Kitty",
"Katty",
"Kitty",
"Katty",
"Katty",
"Katty",
"Katty",
"Kitty",
"Katty",
"Katty",
"Kitty",
"Kitty"
)
// ".stripMargin.split("\n").toList
SetInt(
"""100
|272
|153
|933
|733
|191
|10
|402
|29
|922
|408
|102
|366
|245
|514
|882
|807
|345
|704
|631
|299
|517
|675
|714
|114
|3
|3
|964
|299
|650
|489
|434
|3
|369
|885
|408
|732
|28
|677
|407
|774
|529
|157
|651
|906
|110
|812
|299
|595
|731
|328
|382
|661
|610
|789
|408
|742
|218
|191
|974
|678
|717
|891
|339
|672
|326
|29
|272
|480
|180
|336
|3
|489
|393
|299
|967
|345
|75
|838
|858
|623
|258
|272
|720
|844
|585
|273
|604
|795
|758
|299
|240
|299
|897
|299
|361
|696
|855
|29
|66
|812
""".stripMargin)
}
/*
100
272 Kitty
153 Katty
933 Katty
733 Katty
191 Katty
10 Kitty
402 Kitty
29 Katty
922 Kitty
408 Kitty
102 Kitty
366 Kitty
245 Katty
514 Kitty
882 Kitty
807 Katty
345 Katty
704 Kitty
631 Katty
299 Katty
517 Katty
675 Katty
714 Kitty
114 Kitty
3 Katty
3 Katty
964 Kitty
299 Katty
650 Kitty
489 Katty
434 Kitty
3 Katty
369 Katty
885 Katty
408 Kitty
732 Kitty
28 Kitty
677 Katty
407 Katty
774 Kitty
529 Katty
157 Katty
651 Katty
906 Kitty
110 Kitty
812 Kitty
299 Katty
595 Katty
731 Katty
328 Kitty
382 Kitty
661 Katty
610 Kitty
789 Katty
408 Kitty
742 Kitty
218 Kitty
191 Katty
974 Kitty
678 Kitty
717 Katty
891 Katty
339 Katty
672 Kitty
326 Kitty
29 Katty
272 Kitty
480 Kitty
180 Kitty
336 Kitty
3 Katty
489 Katty
393 Katty
299 Katty
967 Katty
345 Katty
75 Katty
838 Kitty
858 Kitty
623 Katty
258 Kitty
272 Kitty
720 Kitty
844 Kitty
585 Katty
273 Katty
604 Kitty
795 Katty
758 Kitty
299 Katty
240 Kitty
299 Katty
897 Katty
299 Katty
361 Katty
696 Kitty
855 Katty
29 Katty
66 Kitty
812 Kitty
*/
| 1178615156/hackerrank | src/main/scala/HourRank_6/KittyAndKatty.scala | Scala | apache-2.0 | 4,725 |
package capitulo09
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
import scala.io.Source
/**
* Para ler todas as linhas de um arquivo
* chame o método getLines de um objeto
* scala.io.Source
*/
@RunWith(classOf[JUnitRunner])
class LendoLinhasArquivo extends FunSuite{
test("lendo linhas do arquivo myfile.txt"){
val source = Source.fromFile("src/test/resources/myfile.txt","UTF-8")
//o primeiro argumento pode ser uma string ou
//um java.io.File
//você pode omitir o encoding se você sabe que
//o arquivo usa o enconding padrão da plataforma
//o resultado é um Iterator
val lineIterator = source.getLines;
//você pode processar as linhas uma a por vez
var s = ""
for (l <- lineIterator) s += l + " "
assert(s == "Linha 1 Linha 2 Linha 3 ")
source.close;
val source2 = Source.fromFile("src/test/resources/myfile.txt","UTF-8")
//ou pode colocar as linhas um array ou array buffer por
//aplicar os métodos toArray ou toBuffer no Iterator
val lines = source2.getLines.toArray
assert(lines.length == 3)
source2.close;
//em algumas situações você quer apenas ler o
//arquivo inteiro para uma string
val source3 = Source.fromFile("src/test/resources/myfile.txt","UTF-8")
val contents = source3.mkString
assert("Linha 1\\nLinha 2\\nLinha 3" == contents)
source3.close;
}
} | celioeduardo/scala-impatient | src/test/scala/capitulo09/LeituraLinhasArquivo.scala | Scala | mit | 1,469 |
package keystoneml.nodes.util
import breeze.linalg.DenseVector
import org.apache.spark.rdd.RDD
import keystoneml.pipelines._
import keystoneml.workflow.Transformer
/**
* Given a class label, returns a binary vector that indicates when that class is present.
*
* Expects labels in the range [0, numClasses) and numClasses > 1.
*
* @param numClasses
*/
case class ClassLabelIndicatorsFromIntLabels(numClasses: Int)
extends Transformer[Int, DenseVector[Double]] {
assert(numClasses > 1, "numClasses must be > 1.")
def apply(in: Int): DenseVector[Double] = {
if(in < 0 || in >= numClasses) {
throw new RuntimeException("Class labels are expected to be in the range [0, numClasses)")
}
val indicatorVector = DenseVector.fill(numClasses, -1.0)
indicatorVector(in) = 1.0
indicatorVector
}
}
/**
* Given a set of class labels, returns a binary vector that indicates when each class is present.
*
* Expects labels in the range [0, numClasses) and numClasses > 1.
*
* @param numClasses
*/
case class ClassLabelIndicatorsFromIntArrayLabels(numClasses: Int, validate: Boolean = false)
extends Transformer[Array[Int], DenseVector[Double]] {
assert(numClasses > 1, "numClasses must be > 1.")
def apply(in: Array[Int]): DenseVector[Double] = {
if(validate && (in.max >= numClasses || in.min < 0)) {
throw new RuntimeException("Class labels are expected to be in the range [0, numClasses)")
}
val indicatorVector = DenseVector.fill(numClasses, -1.0)
var i = 0
while (i < in.length) {
indicatorVector(in(i)) = 1.0
i += 1
}
indicatorVector
}
} | amplab/keystone | src/main/scala/keystoneml/nodes/util/ClassLabelIndicators.scala | Scala | apache-2.0 | 1,638 |
package com.sksamuel.avro4s.schema
import com.sksamuel.avro4s.AvroSchema
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class PrimitiveSchemaTest extends AnyWordSpec with Matchers {
"SchemaEncoder" should {
"support top level Booleans" in {
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/top_level_boolean.json"))
val schema = AvroSchema[Boolean]
schema.toString(true) shouldBe expected.toString(true)
}
"support top level Longs" in {
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/top_level_long.json"))
val schema = AvroSchema[Long]
schema.toString(true) shouldBe expected.toString(true)
}
"support top level Integers" in {
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/top_level_integer.json"))
val schema = AvroSchema[Int]
schema.toString(true) shouldBe expected.toString(true)
}
"support top level Strings" in {
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/top_level_string.json"))
val schema = AvroSchema[String]
schema.toString(true) shouldBe expected.toString(true)
}
"support top level Floats" in {
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/top_level_float.json"))
val schema = AvroSchema[Float]
schema.toString(true) shouldBe expected.toString(true)
}
"support top level Doubles" in {
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/top_level_double.json"))
val schema = AvroSchema[Double]
schema.toString(true) shouldBe expected.toString(true)
}
}
}
| sksamuel/avro4s | avro4s-core/src/test/scala/com/sksamuel/avro4s/schema/PrimitiveSchemaTest.scala | Scala | apache-2.0 | 1,817 |
package com.ovoenergy.comms.monitor
package metrics
import fs2._
import cats._, implicits._
import com.ovoenergy.comms.model.Feedback
import com.ovoenergy.comms.logging.core.Logger
import model._
object CommsStatus {
def pipeline[F[_]: Monad: Logger: Reporter](msg: Feedback): Stream[F, Unit] =
getInfo(msg).evalMap {
case (status, template, channel) =>
def statusLabel = status match {
case Status.Failed => "failed"
case Status.Delivered => "delivered"
}
val tags = Map(
"status" -> statusLabel,
"template-id" -> template.id.value,
"template-version" -> template.version
) ++ channel.foldMap { c =>
Map("channel" -> c.toString.toLowerCase)
}
Reporter[F].counter("messages.count", tags).flatMap(_.increment)
}
def getInfo[F[_]](msg: Feedback)(
implicit log: Logger[F]): Stream[F, (Status, Template, Option[Channel])] = {
if (msg.templateManifest.isDefined) Stream.emit(msg)
else
Stream.eval_ {
log.warn(EventId(msg.metadata.eventId))(
"Discarded: this message does not have a template manifest, which is a violation of the contract")
}
}.map { msg =>
(
Status.fromFeedback(msg.status),
msg.templateManifest.map(Template.fromManifest),
msg.channel.flatMap(Channel.removePhone).some
).tupled
}.unNone
}
| ovotech/comms-monitor-service | src/main/scala/com/ovoenergy/comms/monitor/metrics/CommsStatus.scala | Scala | mit | 1,410 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package views.js
import play.api.libs.json.Writes
import play.api.libs.json.Json
import play.twirl.api.JavaScript
/**
* Contains helpers intended to be used in JavaScript templates
*/
package object helper {
/**
* Generates a JavaScript value from a Scala value.
*
* {{{
* @(username: String)
* alert(@helper.json(username));
* }}}
*
* @param a The value to convert to JavaScript
* @return A JavaScript value
*/
def json[A: Writes](a: A): JavaScript = JavaScript(Json.stringify(Json.toJson(a)))
}
| playframework/playframework | core/play/src/main/scala/views/js/helper/package.scala | Scala | apache-2.0 | 608 |
package one.lockstep.vault
import one.lockstep.lock.client.LockErr.Cancelled
import one.lockstep.lock.client._
import one.lockstep.util.Bytes
import scala.concurrent.duration.Duration
import scala.concurrent._
private class OnlineOperationFutureAdapter[A](preparing: Future[OnlineOperation[A]])
(implicit ec: ExecutionContext) extends OnlineOperation[A] {
private val promise = Promise[OnlineOperation[A]]()
preparing.onComplete { triedOnlineOp =>
if (!promise.tryComplete(triedOnlineOp)) {
// the operation was already cancelled by the user,
// propagate the cancellation to the underlying operation (which will not be executed anyway)
triedOnlineOp.foreach(_.cancel())
}
}
override def execute(passcode: Bytes, timeout: Duration): Future[A] = promise.future.flatMap {
onlineOp => onlineOp.execute(passcode, timeout)
}
def cancel(): Boolean = {
def earlyCancel() = promise.tryFailure(new LockException(Cancelled))
def lateCancel() = promise.future.value.get.map(onlineOp => onlineOp.cancel()).getOrElse(false)
earlyCancel() || lateCancel()
}
}
| lockstep-one/vault | vault-client/src/main/scala/one/lockstep/vault/OnlineOperationFutureAdapter.scala | Scala | agpl-3.0 | 1,154 |
package com.twitter.finagle.memcached.stress
import com.twitter.finagle.builder.ClientBuilder
import com.twitter.finagle.memcached.protocol._
import com.twitter.finagle.memcached.protocol.text.Memcached
import com.twitter.finagle.memcached.Server
import com.twitter.finagle.memcached.util.ChannelBufferUtils._
import com.twitter.finagle.Service
import com.twitter.util.{Await, Time}
import java.net.InetSocketAddress
import org.specs.SpecificationWithJUnit
class InterpreterServiceSpec extends SpecificationWithJUnit {
"InterpreterService" should {
var server: Server = null
var client: Service[Command, Response] = null
doBefore {
server = new Server(new InetSocketAddress(0))
val address = server.start().localAddress
client = ClientBuilder()
.hosts(address)
.codec(new Memcached)
.hostConnectionLimit(1)
.build()
}
doAfter {
server.stop()
}
"set & get" in {
val _key = "key"
val value = "value"
val zero = "0"
val start = System.currentTimeMillis
(0 until 100) map { i =>
val key = _key + i
Await.result(client(Delete(key)))
Await.result(client(Set(key, 0, Time.epoch, value)))
Await.result(client(Get(Seq(key)))) mustEqual Values(Seq(Value(key, value, None, Some(zero))))
}
val end = System.currentTimeMillis
// println("%d ms".format(end - start))
}
}
}
| stevegury/finagle | finagle-memcached/src/test/scala/com/twitter/finagle/memcached/stress/InterpreterServiceSpec.scala | Scala | apache-2.0 | 1,438 |
package breeze.maxent
import breeze.util.Index
import breeze.util.Encoder
import breeze.util.Profiling
import breeze.optimize.{FirstOrderMinimizer, DiffFunction}
import breeze.linalg._
import breeze.collection.mutable.SparseArray
/**
*
* @author dlwh
*/
abstract class MaxEntObjectiveFunction extends DiffFunction[DenseVector[Double]] {
type Context
type Decision
type Feature
val contextIndex: Index[Context]
val decisionIndex: Index[Decision]
val indexedDecisionsForContext:IndexedSeq[IndexedSeq[Int]]
protected def features(d: Decision, c: Context):IndexedSeq[Feature]
protected def initialValueForFeature(f: Feature):Double
// (Context -> Decision -> log p(decision|context)) => (log prob, expected count of (context,decision)
protected def expectedCounts(logThetas: IndexedSeq[Vector[Double]]):(Double,IndexedSeq[Vector[Double]])
lazy val contextBroker = Encoder.fromIndex(contextIndex)
protected lazy val decisionBroker = Encoder.fromIndex(decisionIndex)
// feature grid is contextIndex -> decisionIndex -> Seq[feature index]
lazy val (featureIndex: Index[Feature], featureGrid: Array[SparseArray[Array[Int]]]) = {
val index = Index[Feature]()
val grid = contextBroker.fillArray(decisionBroker.mkSparseArray[Array[Int]])
for{
cI <- 0 until contextIndex.size
c = contextIndex.get(cI)
dI <- indexedDecisionsForContext(cI)
} {
val d = decisionIndex.get(dI)
val f = features(d,c)
Profiling.time(c + " " + d + f.size) {
if(!f.isEmpty) {
grid(cI)(dI) = f.map(index.index).toArray.sorted
}
}
}
(index,grid:Array[SparseArray[Array[Int]]])
}
lazy val featureEncoder = Encoder.fromIndex(featureIndex)
lazy val defaultInitWeights = Counter(featureIndex.map{ f => (f,initialValueForFeature(f) + math.log(.02 * math.random + 0.99))})
lazy val encodedInitialWeights = featureEncoder.encodeDense(defaultInitWeights)
protected def decodeThetas(m: IndexedSeq[Vector[Double]]): Counter2[Context,Decision,Double] = {
val result = Counter2[Context,Decision,Double]
for( (vec,cI) <- m.iterator.zipWithIndex) {
result(contextIndex.get(cI),::) := decisionBroker.decode(vec)
}
result
}
// Context -> Decision -> log p(decision|context)
private def computeLogThetas(weights: DenseVector[Double]): IndexedSeq[Vector[Double]] = {
val thetas = contextBroker.mkArray[Vector[Double]]
for((dIs,cI) <- featureGrid.zipWithIndex) {
thetas(cI) = decisionBroker.mkDenseVector(Double.NegativeInfinity)
for((dI,features) <- dIs.iterator) {
val score = sumWeights(features,weights)
thetas(cI)(dI) = score
}
}
logNormalizeRows(thetas)
}
private def logNormalizeRows(thetas: IndexedSeq[Vector[Double]]) = {
for( vec <- thetas) {
vec -= softmax(vec)
}
thetas
}
// basically just a dot product
protected def sumWeights(indices: Array[Int], weights: DenseVector[Double]) = {
var i = 0
var sum = 0.0
while(i < indices.length) {
val f = indices(i)
sum += weights(f)
i += 1
}
sum
}
override def calculate(weights: DenseVector[Double]) = {
val encodedThetas = computeLogThetas(weights)
val (marginalLogProb,eCounts) = expectedCounts(encodedThetas)
val encodedTotals = eCounts.map(v => softmax(v))
val (expCompleteLogProb,grad) = computeGradient(weights, encodedThetas, eCounts, encodedTotals)
(-marginalLogProb,grad)
}
override def valueAt(weights: DenseVector[Double]) = {
val encodedThetas = computeLogThetas(weights)
val (marginalLogProb,eCounts) = expectedCounts(encodedThetas)
-marginalLogProb
}
// computes just the value
protected def computeValue(featureWeights: Vector[Double], logThetas: IndexedSeq[Vector[Double]], eCounts: IndexedSeq[Vector[Double]], eTotals: IndexedSeq[Double]) = {
var logProb = 0.0
for( (vec,c) <- eCounts.zipWithIndex) {
val cTheta = logThetas(c)
logProb += cTheta dot vec
}
-logProb
}
// computes expected complete log Likelihood and gradient
protected def computeGradient(featureWeights: Vector[Double], logThetas: IndexedSeq[Vector[Double]], eCounts: IndexedSeq[Vector[Double]], eTotals: IndexedSeq[Double]): (Double,DenseVector[Double]) = {
// gradient is \sum_{d,c} e(d,c) * (f(d,c) - \sum_{d'} exp(logTheta(c,d')) f(d',c))
// = \sum_{d,c} (e(d,c) - e(*,c) exp(logTheta(d,c))) f(d,c)
// = \sum_{d,c} margin(d,c) * f(d,c)
//
// e(*,c) = \sum_d e(d,c) == eCounts(c).total
val (grad: DenseVector[Double],prob: Double) = eCounts.zipWithIndex.par.view.map { case (vec,c) =>
val cTheta = logThetas(c)
var logProb = 0.0
val logTotal = math.log(eTotals(c))
val featureGrad = featureEncoder.mkDenseVector(0.0)
vec match {
case vec: SparseVector[Double] =>
var i = 0
while(i < vec.activeSize) {
val d = vec.indexAt(i)
val e = vec.valueAt(i)
val lT = cTheta(d)
logProb += e * lT
val margin = e - math.exp(logTotal + lT)
var j = 0
val grid = featureGrid(c)(d)
if(grid != null)
while(j < grid.size) {
val f = grid(j)
featureGrad(f) += margin
j += 1
}
i += 1
}
case _ =>
for((d,e) <- vec.activeIterator) {
val lT = cTheta(d)
logProb += e * lT
val margin = e - math.exp(logTotal + lT)
val grid = featureGrid(c)(d)
if(grid != null)
for( f <- grid)
featureGrad(f) += margin
}
}
(featureGrad,logProb)
}.fold((featureEncoder.mkDenseVector(0.0),0.0)) { (gradObj1,gradObj2) =>
gradObj2._1 += gradObj1._1
(gradObj2._1, gradObj1._2 + gradObj2._2)
}
val realProb = - prob
val finalGrad = grad * -1.0
(realProb,finalGrad)
}
class mStepObjective(encodedCounts: IndexedSeq[Vector[Double]]) extends DiffFunction[DenseVector[Double]] {
val encodedTotals = encodedCounts.map(v => softmax(v))
override def calculate(weights: DenseVector[Double]) = {
val logThetas = computeLogThetas(weights)
computeGradient(weights,logThetas,encodedCounts,encodedTotals)
}
override def valueAt(weights: DenseVector[Double]) = {
val logThetas = computeLogThetas(weights)
computeValue(weights,logThetas,encodedCounts,encodedTotals)
}
}
final case class State(encodedWeights: DenseVector[Double], marginalLikelihood: Double) {
private[MaxEntObjectiveFunction] lazy val encodedLogThetas =computeLogThetas(encodedWeights)
lazy val logThetas = decodeThetas(encodedLogThetas)
lazy val weights = featureEncoder.decode(encodedWeights)
}
/*
def emIterations(initialWeights: Counter[Feature,Double] = defaultInitWeights,
maxMStepIterations: Int=90,
optParams: FirstOrderMinimizer.OptParams): Iterator[State] = {
val log = Log.globalLog
val weightsIterator = Iterator.iterate(State(featureEncoder.encodeDense(initialWeights),Double.NegativeInfinity)) { state =>
val (marginalLogProb,eCounts) = expectedCounts(state.encodedLogThetas)
val obj = new mStepObjective(eCounts)
val optimizer = optParams.minimizer(obj)
val newWeights = optimizer.minimize(obj, state.encodedWeights)
val nrm = norm(state.encodedWeights - newWeights,2) / newWeights.size
State(newWeights,marginalLogProb)
}
weightsIterator drop 1 // initial iteration is crap
}
*/
}
trait EasyMaxEnt { maxent: MaxEntObjectiveFunction =>
protected def decisionsForContext(c: Context): Iterator[Decision]
protected def allContexts: Iterator[Context]
val contextIndex: Index[Context] = Index(allContexts)
val (decisionIndex,indexedDecisionsForContext) = {
val decisionIndex = Index[Decision]
val indexedDecisionsForContext = contextBroker.mkArray[IndexedSeq[Int]]
for( (c,cI) <- contextIndex.pairs) {
indexedDecisionsForContext(cI) = scala.util.Sorting.stableSort(decisionsForContext(c).map(decisionIndex.index _).toSeq)
}
(decisionIndex,indexedDecisionsForContext:IndexedSeq[IndexedSeq[Int]])
}
/** Should compute marginal likelihood and expected counts for the data */
protected def expectedCounts(logThetas: Counter2[Context,Decision,Double]):(Double,Counter2[Context,Decision,Double])
protected def expectedCounts(encodedThetas: IndexedSeq[Vector[Double]]):(Double,IndexedSeq[Vector[Double]]) = {
val logThetas = decodeThetas(encodedThetas)
val (marginalLogProb,eCounts) = expectedCounts(logThetas)
(marginalLogProb,encodeCounts(eCounts))
}
private def encodeCounts(eCounts: Counter2[Context,Decision,Double]): Array[Vector[Double]] = {
val encCounts = contextBroker.mkArray[Vector[Double]]
for( c <- eCounts.keysIterator.map(_._1).toSet[Context]) {
val ctr = eCounts(c,::)
val cI = contextIndex(c)
val encCtr = decisionBroker.encode(ctr)
encCounts(cI) = encCtr
}
encCounts
}
class mStepObjective(eCounts: Counter2[Context,Decision,Double]) extends maxent.mStepObjective(encodeCounts(eCounts))
}
| tjhunter/scalanlp-core | learn/src/main/scala/breeze/maxent/MaxEntObjectiveFunction.scala | Scala | apache-2.0 | 9,299 |
import sbt._
import Keys._
import org.scalatra.sbt._
import org.scalatra.sbt.PluginKeys._
import com.mojolly.scalate.ScalatePlugin._
import sbtassembly.Plugin._
import AssemblyKeys._
import ScalateKeys._
object DruidiaBuild extends Build {
val ScalatraVersion = "2.2.0"
lazy val project = Project ("druidia", file("."),
settings = Defaults.defaultSettings ++ ScalatraPlugin.scalatraWithJRebel ++ scalateSettings ++ assemblySettings ++ Seq(
resolvers += Classpaths.typesafeReleases,
scalateTemplateConfig in Compile <<= (sourceDirectory in Compile){ base =>
Seq(
TemplateConfig(
base / "webapp" / "WEB-INF" / "templates",
Seq.empty, /* default imports should be added here */
Seq(
Binding("context", "_root_.org.scalatra.scalate.ScalatraRenderContext", importMembers = true, isImplicit = true)
), /* add extra bindings here */
Some("templates")
)
)
}
)
) settings(
mergeStrategy in assembly <<= (mergeStrategy in assembly) { (old) =>
{
case "about.html" => MergeStrategy.discard
case x => old(x)
}
}
)
}
| modcloth-labs/druidia | project/build.scala | Scala | mit | 1,201 |
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.fixture
import scala.concurrent.{Future, ExecutionContext}
import org.scalatest._
@Finders(Array("org.scalatest.finders.FunSpecFinder"))
trait AsyncFunSpecLike extends FunSpecRegistration with AsyncTests with org.scalatest.OneInstancePerTest { thisSuite =>
implicit def executionContext: ExecutionContext
override private[scalatest] def transformToOutcome(testFun: FixtureParam => Registration): FixtureParam => AsyncOutcome =
(fixture: FixtureParam) => {
val futureUnit = testFun(fixture)
FutureOutcome(
futureUnit.map(u => Succeeded).recover {
case ex: exceptions.TestCanceledException => Canceled(ex)
case _: exceptions.TestPendingException => Pending
case tfe: exceptions.TestFailedException => Failed(tfe)
case ex: Throwable if !Suite.anExceptionThatShouldCauseAnAbort(ex) => Failed(ex)
}
)
}
private final val engine: FixtureEngine[FixtureParam] = getEngine
import engine._
protected override def runTest(testName: String, args: Args): Status = {
if (args.runTestInNewInstance) {
// In initial instance, so create a new test-specific instance for this test and invoke run on it.
val oneInstance = newInstance
oneInstance.run(Some(testName), args)
}
else {
// Therefore, in test-specific instance, so run the test.
def invokeWithAsyncFixture(theTest: TestLeaf): AsyncOutcome = {
val theConfigMap = args.configMap
val testData = testDataFor(testName, theConfigMap)
FutureOutcome(
withAsyncFixture(
new OneArgAsyncTest {
val name = testData.name
def apply(fixture: FixtureParam): Future[Outcome] =
theTest.testFun(fixture).toFutureOutcome
val configMap = testData.configMap
val scopes = testData.scopes
val text = testData.text
val tags = testData.tags
}
)
)
}
runTestImpl(thisSuite, testName, args, true, invokeWithAsyncFixture)
}
}
} | SRGOM/scalatest | scalatest/src/main/scala/org/scalatest/fixture/AsyncFunSpecLike.scala | Scala | apache-2.0 | 2,701 |
/*******************************************************************************
Copyright (c) 2013, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
***************************************************************************** */
package kr.ac.kaist.jsaf.nodes_util
import java.lang.Double
import java.lang.String
import java.math.BigInteger
import java.io.Writer
import java.util.Collections
import java.util.List
import java.util.Map
import java.util.ArrayList
import java.util.LinkedList
import kr.ac.kaist.jsaf.nodes_util._
import kr.ac.kaist.jsaf.useful._
import edu.rice.cs.plt.tuple.Option
class SpanInfo(span: Span) extends UIDObject {
def getSpan() = span
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/nodes_util/SpanInfo.scala | Scala | bsd-3-clause | 772 |
package com.onion.view
import com.onion.model._
import com.onion.mongo.DB.UserDao
import spray.json.DefaultJsonProtocol
import sprest.util.enum.{EnumCompanion, Enum}
import scala.concurrent.Future
import com.onion.util.FutureUtil._
import com.onion.util.OptionUtil._
import scala.concurrent.ExecutionContext.Implicits.global
/**
* Created by famo on 1/30/15.
*/
object ViewObject {
type UserDetail = User
case class UserAbstraction(id: Option[String], name: Option[String], title: Option[String], icon: Option[String], rating: Option[Int])
object UserAbstraction extends DefaultJsonProtocol {
implicit val format = jsonFormat5(apply)
}
case class MeetingAbstraction(id: Option[String], subject: Option[String], target: Option[String], desc: Option[String], price: Option[Double],
createTime: Option[Long], updateTime: Option[Long], seller: Option[UserAbstraction])
object MeetingAbstraction extends DefaultJsonProtocol {
implicit val format = jsonFormat8(apply)
def fromModel(meeting: Meeting, user: User): MeetingAbstraction = {
MeetingAbstraction(meeting.id, meeting.subject, meeting.target, meeting.description, meeting.price,
meeting.createTime, meeting.updateTime,
UserAbstraction(user.id, user.name, user.jobTitle, user.photo, user.score))
}
}
case class MeetingAbsResponse(meetings: Option[Iterable[MeetingAbstraction]])
object MeetingAbsResponse extends DefaultJsonProtocol {
implicit val format = jsonFormat1(apply)
def fromModels(meetingsFuture: Future[Iterable[Meeting]]): Future[MeetingAbsResponse] = {
meetingsFuture.scanIterable[MeetingAbstraction]((meeting: Meeting) => {
UserDao.findById(meeting.userId).make[MeetingAbstraction](user => MeetingAbstraction.fromModel(meeting, user)).get(null)
}).map(MeetingAbsResponse(_))
//
//
// val result = for (meetings <- meetingsFuture)
// yield {
// val listOfFuture = for (meeting <- meetings)
// yield for (userOpt <- UserDao.findById(meeting.userId))
// yield for (user <- userOpt)
// yield MeetingAbstraction.fromModel(meeting, user)
// Future.sequence(listOfFuture).map(_.filter(_.isDefined).map(_.get))
// }
//
// result.flatMap(_.map(MeetingAbsResponse(_)))
}
}
case class CommentDetail(id: Option[String], rating: Option[Int], content: Option[String], user: Option[UserAbstraction]) {
def toComment: Comment = Comment(
id,
rating,
content,
user.id
)
}
object CommentDetail extends DefaultJsonProtocol {
implicit val format = jsonFormat4(apply)
def fromModel(comment: Comment, user: User): CommentDetail = {
CommentDetail(comment.id, comment.rating, comment.content, UserAbstraction(user.id, user.name, user.jobTitle, user.photo, user.score))
}
}
case class MeetingDetail(id: Option[String], cityId: Option[String], subject: Option[String], target: Option[String],
description: Option[String], price: Option[Double], createTime: Option[Long], updateTime: Option[Long],
seller: Option[UserAbstraction], selection: Option[List[Selection]], comments: Option[List[CommentDetail]]) {
def toMeeting: Meeting =
Meeting(
id,
cityId,
seller.id,
subject,
description,
target,
price,
selection,
comments.getOrElse(List()).map(_.toComment),
createTime,
updateTime,
false
)
}
object MeetingDetail extends DefaultJsonProtocol {
implicit val format = jsonFormat11(apply)
def fromModels(meeting: Meeting, seller: User, comments: List[CommentDetail]) = {
MeetingDetail(meeting.id, meeting.cityId, meeting.subject, meeting.target, meeting.description, meeting.price, meeting.createTime, meeting.updateTime,
UserAbstraction(seller.id, seller.name, seller.jobTitle, seller.photo, seller.score), meeting.selection, comments)
}
}
case class MeetingResponse(meeting: Option[MeetingDetail])
object MeetingResponse extends DefaultJsonProtocol {
implicit val format = jsonFormat1(apply)
def fromModels(meetingFuture: Future[Option[Meeting]]) = {
meetingFuture
.to(meeting => {
UserDao.findById(meeting.userId)
.to(seller => {
meeting.comments.get.map(comment => {
UserDao.findById(comment.userId).make(user => {
CommentDetail.fromModel(comment, user)
})
}).make[MeetingDetail](comments => {
MeetingDetail.fromModels(meeting, seller, comments.asInstanceOf[List[CommentDetail]])
})
})
}).map(MeetingResponse(_))
}
}
case class PutMeeting(meeting: Option[MeetingDetail])
object PutMeeting extends DefaultJsonProtocol {
implicit val format = jsonFormat1(apply)
}
sealed abstract class ResponseCode(val id: String) extends Enum[ResponseCode](id)
object ResponseCode extends EnumCompanion[ResponseCode] {
case object OK200 extends ResponseCode("200")
case object ERROR400 extends ResponseCode("400")
case object ERROR500 extends ResponseCode("500")
register(
OK200,
ERROR400,
ERROR500
)
}
case class PostResponse(code: ResponseCode, msg: String)
object PostResponse extends DefaultJsonProtocol {
implicit val format = jsonFormat2(apply)
}
case class PostMeeting(meeting: Option[Meeting])
object PostMeeting extends DefaultJsonProtocol {
implicit val format = jsonFormat1(apply)
}
case class PutBook(meetingId: String, selectionId : String, memo: String)
object PutBook extends DefaultJsonProtocol {
implicit val format = jsonFormat3(apply)
}
}
| jasoncao/onion-ring | src/main/scala/com/onion/view/ViewObject.scala | Scala | apache-2.0 | 5,915 |
import reflect.ClassTag
class Co[+S]
object Co {
def empty[X: ClassTag]: Co[X] = ???
}
class Contra[-S]
object Contra {
def empty[X: ClassTag]: Contra[X] = ???
}
class Foo[+FT](x: FT) {
def fooArray: Foo[Array[String]] = new Foo(Array.empty)
val y1: Array[String] = Array.empty
def fooCo: Foo[Co[String]] = new Foo(Co.empty)
val y2: Co[String] = Co.empty
def fooContra: Foo[Contra[String]] = new Foo(Contra.empty)
val y3: Contra[String] = Contra.empty
}
| som-snytt/dotty | tests/pos/i6127.scala | Scala | apache-2.0 | 471 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.box.retriever
import org.scalatest.Matchers._
import org.scalatest.WordSpec
import uk.gov.hmrc.ct.box._
class BoxRetrieverSpec extends WordSpec {
val fakeBox1Validation = CtValidation(Some("FakeBox1"), "fake1")
val fakeBox2Validation = CtValidation(Some("FakeBox2"), "fake2")
"Box Retriever" should {
"return validated box errors" in new BoxRetriever {
override def generateValues: Map[String, CtValue[_]] = ???
val values = Map(
"FakeBox1" -> FakeBox1(""),
"FakeBox2" -> FakeBox2("")
)
val errors = validateValues(values)
errors.size shouldBe 2
errors should contain(fakeBox1Validation)
errors should contain(fakeBox2Validation)
}
}
case class FakeBox1(value: String) extends CtBoxIdentifier("poo") with Input with CtString with ValidatableBox[BoxRetriever] {
override def validate(boxRetriever: BoxRetriever): Set[CtValidation] = {
Set(fakeBox1Validation)
}
}
case class FakeBox2(value: String) extends CtBoxIdentifier("poo") with Input with CtString with ValidatableBox[BoxRetriever] {
override def validate(boxRetriever: BoxRetriever): Set[CtValidation] = {
Set(fakeBox2Validation)
}
}
}
| liquidarmour/ct-calculations | src/test/scala/uk/gov/hmrc/ct/box/retriever/BoxRetrieverSpec.scala | Scala | apache-2.0 | 1,840 |
package com.twitter.finagle.thrift
import com.twitter.finagle.Service
import com.twitter.util.Future
import org.specs.mock.Mockito
import org.specs.SpecificationWithJUnit
class ClientIdRequiredFilterSpec extends SpecificationWithJUnit with Mockito {
"ClientIdRequiredFilter" should {
val underlying = mock[Service[String, String]]
val service = new ClientIdRequiredFilter andThen underlying
val request = "request"
val response = Future.value("response")
val clientId = ClientId("test")
"passes through when ClientId exists" in {
underlying(request) returns response
clientId.asCurrent {
val result = service(request)
result() mustEqual response()
result
}
}
"throws NoClientIdSpecifiedException when ClientId does not exist" in {
service(request)() must throwA[NoClientIdSpecifiedException]
there was no(underlying).apply(any[String])
}
}
}
| joshbedo/finagle | finagle-thrift/src/test/scala/com/twitter/finagle/thrift/ClientIdRequiredFilterSpec.scala | Scala | apache-2.0 | 940 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.codegen.calls
import org.apache.flink.table.planner.codegen.CodeGenUtils.{BINARY_STRING, qualifyMethod}
import org.apache.flink.table.planner.codegen.GenerateUtils.generateCallIfArgsNotNull
import org.apache.flink.table.planner.codegen.{CodeGeneratorContext, GeneratedExpression}
import org.apache.flink.table.types.logical.LogicalType
import java.lang.reflect.Method
import java.util.TimeZone
class MethodCallGen(method: Method) extends CallGenerator {
override def generate(
ctx: CodeGeneratorContext,
operands: Seq[GeneratedExpression],
returnType: LogicalType): GeneratedExpression = {
generateCallIfArgsNotNull(ctx, returnType, operands, !method.getReturnType.isPrimitive) {
originalTerms => {
val terms = originalTerms.zip(method.getParameterTypes).map { case (term, clazz) =>
// convert the BinaryString parameter to String if the method parameter accept String
if (clazz == classOf[String]) {
s"$term.toString()"
} else {
term
}
}
// generate method invoke code and adapt when it's a time zone related function
val call = if (terms.length + 1 == method.getParameterCount &&
method.getParameterTypes()(terms.length) == classOf[TimeZone]) {
// insert the zoneID parameters for timestamp functions
val timeZone = ctx.addReusableSessionTimeZone()
s"""
|${qualifyMethod(method)}(${terms.mkString(", ")}, $timeZone)
""".stripMargin
} else {
s"""
|${qualifyMethod(method)}(${terms.mkString(", ")})
""".stripMargin
}
// convert String to BinaryString if the return type is String
if (method.getReturnType == classOf[String]) {
s"$BINARY_STRING.fromString($call)"
} else {
call
}
}
}
}
}
| bowenli86/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/codegen/calls/MethodCallGen.scala | Scala | apache-2.0 | 2,743 |
package logic
import scalaz._
trait MonadLogic[F[_]] extends MonadPlus[F] {
private def maybe[A, B](m: Option[A], default: => B)(f: A => B): B =
m match {
case None => default
case Some(a) => f(a)
}
def split[A](m: F[A]): F[Option[(A, F[A])]]
def interleave[A](m1: F[A], m2: F[A]): F[A] =
bind(split(m1))(maybe(_, m2){ case (a, m1a) => plus(pure(a), interleave(m2, m1a)) })
def >>-[A, B](m: F[A])(f: A => F[B]): F[B] =
bind(bind(split(m))(maybe(_, empty[(A, F[A])])(pure(_)))) { case (a, m1) =>
interleave(f(a), >>-(m1)(f))
}
def ifte[A, B](t: F[A], el: F[B])(th: A => F[B]): F[B] =
bind(split(t))(maybe(_, el){ case (a, m) => plus(th(a), bind(m)(th)) })
def once[A](m: F[A]): F[A] =
bind(bind(split(m))(maybe(_, empty[(A, F[A])])(pure(_)))) { case (a, _) => pure(a) }
}
object MonadLogic extends MonadLogicInstances with MonadLogicFunctions {
@inline def apply[F[_]](implicit F: MonadLogic[F]): MonadLogic[F] = F
}
trait MonadLogicFunctions {
def reflect[F[_], A](x: Option[(A, F[A])])(implicit L: MonadLogic[F]): F[A] =
x match {
case None => L.empty
case Some((a, m)) => L.plus(L.pure(a), m)
}
def lnot[F[_], A](m: F[A])(implicit L: MonadLogic[F]): F[Unit] =
L.ifte(L.once(m), L.pure(()))(_ => L.empty)
}
trait MonadLogicInstances2 {
implicit def writerTMonadLogic[F[_], W](implicit L0: MonadLogic[F], M0: Monoid[W]): MonadLogic[WriterT[W, F, *]] = new WriterTMonadLogic[F, W] {
implicit def L: MonadLogic[F] = L0
implicit def M: Monoid[W] = M0
}
}
trait MonadLogicInstances1 extends MonadLogicInstances2 {
import scalaz.StateT._
implicit def stateTMonadLogic[F[_], S](implicit L: MonadLogic[F]): MonadLogic[StateT[S, F, *]] = new MonadLogic[StateT[S, F, *]] {
def point[A](a: => A) = stateTMonadPlus[S, F].point[A](a)
def bind[A, B](fa: StateT[S, F, A])(f: A => StateT[S, F, B]) = stateTMonadPlus[S, F].bind[A, B](fa)(f)
def empty[A] = stateTMonadPlus[S, F].empty[A]
def plus[A](a: StateT[S, F, A], b: => StateT[S, F, A]) = stateTMonadPlus[S, F].plus[A](a, b)
def split[A](sm: StateT[S, F, A]) = StateT(s =>
L.bind(L.split(sm.run(s))) {
case None => L.pure((s, None))
case Some(((s2, a), m)) => L.pure((s2, Some((a, StateT(Function.const(m))))))
})
override def interleave[A](m1: StateT[S, F, A], m2: StateT[S, F, A]): StateT[S, F, A] = StateT(s =>
L.interleave(m1.run(s), m2.run(s))
)
override def >>-[A, B](m: StateT[S, F, A])(f: A => StateT[S, F, B]): StateT[S, F, B] = StateT(s =>
L.>>-(m.run(s)){ case (s2, a) => f(a).run(s2) }
)
override def ifte[A, B](t: StateT[S, F, A], el: StateT[S, F, B])(th: A => StateT[S, F, B]): StateT[S, F, B] =
StateT(s => L.ifte(t.run(s), el.run(s)){ case (s2, a) => th(a).run(s2) })
override def once[A](m: StateT[S, F, A]): StateT[S, F, A] = StateT(s => L.once(m.run(s)))
}
}
trait MonadLogicInstances0 extends MonadLogicInstances1 {
import scalaz.Kleisli._
// MonadLogic[ReaderT[F, E, *]]
implicit def kleisliMonadLogic[F[_], E](implicit L: MonadLogic[F]): MonadLogic[Kleisli[F, E, *]] = new MonadLogic[Kleisli[F, E, *]] {
def point[A](a: => A) = kleisliMonadPlus[F, E].point[A](a)
def bind[A, B](fa: Kleisli[F, E, A])(f: A => Kleisli[F, E, B]) = kleisliMonadPlus[F, E].bind[A, B](fa)(f)
def empty[A] = kleisliMonadPlus[F, E].empty[A]
def plus[A](a: Kleisli[F, E, A], b: => Kleisli[F, E, A]) = kleisliMonadPlus[F, E].plus[A](a, b)
def split[A](rm: Kleisli[F, E, A]) =
Kleisli[F, E, Option[(A, Kleisli[F, E, A])]](e =>
L.bind(L.split(rm.run(e))) {
case None => L.pure(None)
case Some((a, m)) => L.pure(Some((a, kleisliMonadTrans.liftM(m))))
})
}
}
trait MonadLogicInstances extends MonadLogicInstances0 {
import scalaz.std.list.listInstance
implicit val listMonadLogic: MonadLogic[List] = new MonadLogic[List] {
def split[A](l: List[A]) = l match {
case Nil => pure(None)
case x::xs => pure(Some((x, xs)))
}
def point[A](a: => A) = listInstance.point(a)
def bind[A, B](fa: List[A])(f: A => List[B]) = listInstance.bind(fa)(f)
def empty[A] = listInstance.empty[A]
def plus[A](a: List[A], b: => List[A]) = listInstance.plus(a, b)
}
}
private trait WriterTMonadLogic[F[_], W] extends MonadLogic[WriterT[W, F, *]] {
implicit def L: MonadLogic[F]
implicit def M: Monoid[W]
def tell(w: W): WriterT[W, F, Unit] = WriterT(L.pure((w, ())))
def point[A](a: => A) = WriterT.writerTMonad[W, F].point[A](a)
def bind[A, B](fa: WriterT[W, F, A])(f: A => WriterT[W, F, B]) = WriterT.writerTMonad[W, F].bind[A, B](fa)(f)
def empty[A] = WriterT(L.empty[(W, A)])
def plus[A](a: WriterT[W, F, A], b: => WriterT[W, F, A]) = WriterT(L.plus(a.run, b.run))
def split[A](wm: WriterT[W, F, A]) = WriterT(
L.bind(L.split(wm.run)) {
case None => L.pure((M.zero, None))
case Some(((w, a), m)) => L.pure((w, Some((a, WriterT(m)))))
})
override def interleave[A](m1: WriterT[W, F, A], m2: WriterT[W, F, A]): WriterT[W, F, A] =
WriterT(L.interleave(m1.run, m2.run))
override def >>-[A, B](m: WriterT[W, F, A])(f: A => WriterT[W, F, B]): WriterT[W, F, B] =
WriterT(L.>>-(m.run){ case (w, a) => tell(w).flatMap(_ => f(a)).run })
override def ifte[A, B](t: WriterT[W, F, A], el: WriterT[W, F, B])(th: A => WriterT[W, F, B]): WriterT[W, F, B] =
WriterT(L.ifte(t.run, el.run){ case (w, a) => tell(w).flatMap(_ => th(a)).run })
override def once[A](m: WriterT[W, F, A]): WriterT[W, F, A] = WriterT(L.once(m.run))
}
| xuwei-k/scala-logic | src/main/scala/logic/MonadLogic.scala | Scala | mit | 5,620 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants.market
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
/**
* @author garyKeorkunian
* @since 0.1
*
*/
class CurrencyExchangeRateSpec extends AnyFlatSpec with Matchers {
behavior of "CurrencyExchangeRate"
it should "create CurrencyExchangeRates using the default factory method" in {
val rate = CurrencyExchangeRate(USD(1), JPY(100))
rate.base should be(USD(1))
rate.counter should be(JPY(100))
}
it should "properly return a Currency Exchange Rate" in {
val rate = CurrencyExchangeRate(USD(1), JPY(100))
rate.rate should be(100)
}
it should "properly return a converted Money value" in {
val rate1 = CurrencyExchangeRate(USD(1), JPY(100))
val rate2 = CurrencyExchangeRate(USD(1), EUR(75))
// using the convert method directly
rate1.convert(JPY(100)) should be(USD(1))
rate1.convert(USD(1)) should be(JPY(100))
rate2.convert(EUR(75)) should be(USD(1))
rate2.convert(USD(1)) should be(EUR(75))
// using the * operator
rate1 * JPY(100) should be(USD(1))
rate1 * USD(1) should be(JPY(100))
rate2 * EUR(75) should be(USD(1))
rate2 * USD(1) should be(EUR(75))
// using the methods inherited from Ratio
rate1.convertToBase(JPY(100)) should be(USD(1))
rate1.convertToCounter(USD(1)) should be(JPY(100))
rate2.convertToBase(EUR(75)) should be(USD(1))
rate2.convertToCounter(USD(1)) should be(EUR(75))
}
it should "properly return a string formatted as an FX quote" in {
val rate = CurrencyExchangeRate(USD(1), JPY(100))
rate.toString should be("USD/JPY 100.0")
}
}
| typelevel/squants | shared/src/test/scala/squants/market/CurrencyExchangeRateSpec.scala | Scala | apache-2.0 | 2,169 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.{ObjectInput, ObjectOutput, Externalizable}
import java.sql.{Date, Timestamp}
import scala.language.postfixOps
import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
class DatasetSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("toDS") {
val data = Seq(("a", 1) , ("b", 2), ("c", 3))
checkAnswer(
data.toDS(),
data: _*)
}
test("toDS with RDD") {
val ds = sparkContext.makeRDD(Seq("a", "b", "c"), 3).toDS()
checkAnswer(
ds.mapPartitions(_ => Iterator(1)),
1, 1, 1)
}
test("SPARK-12404: Datatype Helper Serializablity") {
val ds = sparkContext.parallelize((
new Timestamp(0),
new Date(0),
java.math.BigDecimal.valueOf(1),
scala.math.BigDecimal(1)) :: Nil).toDS()
ds.collect()
}
test("collect, first, and take should use encoders for serialization") {
val item = NonSerializableCaseClass("abcd")
val ds = Seq(item).toDS()
assert(ds.collect().head == item)
assert(ds.collectAsList().get(0) == item)
assert(ds.first() == item)
assert(ds.take(1).head == item)
assert(ds.takeAsList(1).get(0) == item)
}
test("coalesce, repartition") {
val data = (1 to 100).map(i => ClassData(i.toString, i))
val ds = data.toDS()
assert(ds.repartition(10).rdd.partitions.length == 10)
checkAnswer(
ds.repartition(10),
data: _*)
assert(ds.coalesce(1).rdd.partitions.length == 1)
checkAnswer(
ds.coalesce(1),
data: _*)
}
test("as tuple") {
val data = Seq(("a", 1), ("b", 2)).toDF("a", "b")
checkAnswer(
data.as[(String, Int)],
("a", 1), ("b", 2))
}
test("as case class / collect") {
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDF("a", "b").as[ClassData]
checkAnswer(
ds,
ClassData("a", 1), ClassData("b", 2), ClassData("c", 3))
assert(ds.collect().head == ClassData("a", 1))
}
test("as case class - reordered fields by name") {
val ds = Seq((1, "a"), (2, "b"), (3, "c")).toDF("b", "a").as[ClassData]
assert(ds.collect() === Array(ClassData("a", 1), ClassData("b", 2), ClassData("c", 3)))
}
test("as case class - take") {
val ds = Seq((1, "a"), (2, "b"), (3, "c")).toDF("b", "a").as[ClassData]
assert(ds.take(2) === Array(ClassData("a", 1), ClassData("b", 2)))
}
test("map") {
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.map(v => (v._1, v._2 + 1)),
("a", 2), ("b", 3), ("c", 4))
}
test("map and group by with class data") {
// We inject a group by here to make sure this test case is future proof
// when we implement better pipelining and local execution mode.
val ds: Dataset[(ClassData, Long)] = Seq(ClassData("one", 1), ClassData("two", 2)).toDS()
.map(c => ClassData(c.a, c.b + 1))
.groupBy(p => p).count()
checkAnswer(
ds,
(ClassData("one", 2), 1L), (ClassData("two", 3), 1L))
}
test("select") {
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(expr("_2 + 1").as[Int]),
2, 3, 4)
}
test("select 2") {
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(
expr("_1").as[String],
expr("_2").as[Int]) : Dataset[(String, Int)],
("a", 1), ("b", 2), ("c", 3))
}
test("select 2, primitive and tuple") {
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(
expr("_1").as[String],
expr("struct(_2, _2)").as[(Int, Int)]),
("a", (1, 1)), ("b", (2, 2)), ("c", (3, 3)))
}
test("select 2, primitive and class") {
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(
expr("_1").as[String],
expr("named_struct('a', _1, 'b', _2)").as[ClassData]),
("a", ClassData("a", 1)), ("b", ClassData("b", 2)), ("c", ClassData("c", 3)))
}
test("select 2, primitive and class, fields reordered") {
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
checkDecoding(
ds.select(
expr("_1").as[String],
expr("named_struct('b', _2, 'a', _1)").as[ClassData]),
("a", ClassData("a", 1)), ("b", ClassData("b", 2)), ("c", ClassData("c", 3)))
}
test("filter") {
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.filter(_._1 == "b"),
("b", 2))
}
test("foreach") {
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
val acc = sparkContext.accumulator(0)
ds.foreach(v => acc += v._2)
assert(acc.value == 6)
}
test("foreachPartition") {
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
val acc = sparkContext.accumulator(0)
ds.foreachPartition(_.foreach(v => acc += v._2))
assert(acc.value == 6)
}
test("reduce") {
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
assert(ds.reduce((a, b) => ("sum", a._2 + b._2)) == ("sum", 6))
}
test("joinWith, flat schema") {
val ds1 = Seq(1, 2, 3).toDS().as("a")
val ds2 = Seq(1, 2).toDS().as("b")
checkAnswer(
ds1.joinWith(ds2, $"a.value" === $"b.value", "inner"),
(1, 1), (2, 2))
}
test("joinWith, expression condition, outer join") {
val nullInteger = null.asInstanceOf[Integer]
val nullString = null.asInstanceOf[String]
val ds1 = Seq(ClassNullableData("a", 1),
ClassNullableData("c", 3)).toDS()
val ds2 = Seq(("a", new Integer(1)),
("b", new Integer(2))).toDS()
checkAnswer(
ds1.joinWith(ds2, $"_1" === $"a", "outer"),
(ClassNullableData("a", 1), ("a", new Integer(1))),
(ClassNullableData("c", 3), (nullString, nullInteger)),
(ClassNullableData(nullString, nullInteger), ("b", new Integer(2))))
}
test("joinWith tuple with primitive, expression") {
val ds1 = Seq(1, 1, 2).toDS()
val ds2 = Seq(("a", 1), ("b", 2)).toDS()
checkAnswer(
ds1.joinWith(ds2, $"value" === $"_2"),
(1, ("a", 1)), (1, ("a", 1)), (2, ("b", 2)))
}
test("joinWith class with primitive, toDF") {
val ds1 = Seq(1, 1, 2).toDS()
val ds2 = Seq(ClassData("a", 1), ClassData("b", 2)).toDS()
checkAnswer(
ds1.joinWith(ds2, $"value" === $"b").toDF().select($"_1", $"_2.a", $"_2.b"),
Row(1, "a", 1) :: Row(1, "a", 1) :: Row(2, "b", 2) :: Nil)
}
test("multi-level joinWith") {
val ds1 = Seq(("a", 1), ("b", 2)).toDS().as("a")
val ds2 = Seq(("a", 1), ("b", 2)).toDS().as("b")
val ds3 = Seq(("a", 1), ("b", 2)).toDS().as("c")
checkAnswer(
ds1.joinWith(ds2, $"a._2" === $"b._2").as("ab").joinWith(ds3, $"ab._1._2" === $"c._2"),
((("a", 1), ("a", 1)), ("a", 1)),
((("b", 2), ("b", 2)), ("b", 2)))
}
test("groupBy function, keys") {
val ds = Seq(("a", 1), ("b", 1)).toDS()
val grouped = ds.groupBy(v => (1, v._2))
checkAnswer(
grouped.keys,
(1, 1))
}
test("groupBy function, map") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
val grouped = ds.groupBy(v => (v._1, "word"))
val agged = grouped.mapGroups { case (g, iter) => (g._1, iter.map(_._2).sum) }
checkAnswer(
agged,
("a", 30), ("b", 3), ("c", 1))
}
test("groupBy function, flatMap") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
val grouped = ds.groupBy(v => (v._1, "word"))
val agged = grouped.flatMapGroups { case (g, iter) =>
Iterator(g._1, iter.map(_._2).sum.toString)
}
checkAnswer(
agged,
"a", "30", "b", "3", "c", "1")
}
test("groupBy function, reduce") {
val ds = Seq("abc", "xyz", "hello").toDS()
val agged = ds.groupBy(_.length).reduce(_ + _)
checkAnswer(
agged,
3 -> "abcxyz", 5 -> "hello")
}
test("groupBy single field class, count") {
val ds = Seq("abc", "xyz", "hello").toDS()
val count = ds.groupBy(s => Tuple1(s.length)).count()
checkAnswer(
count,
(Tuple1(3), 2L), (Tuple1(5), 1L)
)
}
test("groupBy columns, map") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
val grouped = ds.groupBy($"_1")
val agged = grouped.mapGroups { case (g, iter) => (g.getString(0), iter.map(_._2).sum) }
checkAnswer(
agged,
("a", 30), ("b", 3), ("c", 1))
}
test("groupBy columns, count") {
val ds = Seq("a" -> 1, "b" -> 1, "a" -> 2).toDS()
val count = ds.groupBy($"_1").count()
checkAnswer(
count,
(Row("a"), 2L), (Row("b"), 1L))
}
test("groupBy columns asKey, map") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
val grouped = ds.groupBy($"_1").keyAs[String]
val agged = grouped.mapGroups { case (g, iter) => (g, iter.map(_._2).sum) }
checkAnswer(
agged,
("a", 30), ("b", 3), ("c", 1))
}
test("groupBy columns asKey tuple, map") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
val grouped = ds.groupBy($"_1", lit(1)).keyAs[(String, Int)]
val agged = grouped.mapGroups { case (g, iter) => (g, iter.map(_._2).sum) }
checkAnswer(
agged,
(("a", 1), 30), (("b", 1), 3), (("c", 1), 1))
}
test("groupBy columns asKey class, map") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
val grouped = ds.groupBy($"_1".as("a"), lit(1).as("b")).keyAs[ClassData]
val agged = grouped.mapGroups { case (g, iter) => (g, iter.map(_._2).sum) }
checkAnswer(
agged,
(ClassData("a", 1), 30), (ClassData("b", 1), 3), (ClassData("c", 1), 1))
}
test("typed aggregation: expr") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
checkAnswer(
ds.groupBy(_._1).agg(sum("_2").as[Long]),
("a", 30L), ("b", 3L), ("c", 1L))
}
test("typed aggregation: expr, expr") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
checkAnswer(
ds.groupBy(_._1).agg(sum("_2").as[Long], sum($"_2" + 1).as[Long]),
("a", 30L, 32L), ("b", 3L, 5L), ("c", 1L, 2L))
}
test("typed aggregation: expr, expr, expr") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
checkAnswer(
ds.groupBy(_._1).agg(sum("_2").as[Long], sum($"_2" + 1).as[Long], count("*")),
("a", 30L, 32L, 2L), ("b", 3L, 5L, 2L), ("c", 1L, 2L, 1L))
}
test("typed aggregation: expr, expr, expr, expr") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
checkAnswer(
ds.groupBy(_._1).agg(
sum("_2").as[Long],
sum($"_2" + 1).as[Long],
count("*").as[Long],
avg("_2").as[Double]),
("a", 30L, 32L, 2L, 15.0), ("b", 3L, 5L, 2L, 1.5), ("c", 1L, 2L, 1L, 1.0))
}
test("cogroup") {
val ds1 = Seq(1 -> "a", 3 -> "abc", 5 -> "hello", 3 -> "foo").toDS()
val ds2 = Seq(2 -> "q", 3 -> "w", 5 -> "e", 5 -> "r").toDS()
val cogrouped = ds1.groupBy(_._1).cogroup(ds2.groupBy(_._1)) { case (key, data1, data2) =>
Iterator(key -> (data1.map(_._2).mkString + "#" + data2.map(_._2).mkString))
}
checkAnswer(
cogrouped,
1 -> "a#", 2 -> "#q", 3 -> "abcfoo#w", 5 -> "hello#er")
}
test("cogroup with complex data") {
val ds1 = Seq(1 -> ClassData("a", 1), 2 -> ClassData("b", 2)).toDS()
val ds2 = Seq(2 -> ClassData("c", 3), 3 -> ClassData("d", 4)).toDS()
val cogrouped = ds1.groupBy(_._1).cogroup(ds2.groupBy(_._1)) { case (key, data1, data2) =>
Iterator(key -> (data1.map(_._2.a).mkString + data2.map(_._2.a).mkString))
}
checkAnswer(
cogrouped,
1 -> "a", 2 -> "bc", 3 -> "d")
}
test("sample with replacement") {
val n = 100
val data = sparkContext.parallelize(1 to n, 2).toDS()
checkAnswer(
data.sample(withReplacement = true, 0.05, seed = 13),
5, 10, 52, 73)
}
test("sample without replacement") {
val n = 100
val data = sparkContext.parallelize(1 to n, 2).toDS()
checkAnswer(
data.sample(withReplacement = false, 0.05, seed = 13),
3, 17, 27, 58, 62)
}
test("SPARK-11436: we should rebind right encoder when join 2 datasets") {
val ds1 = Seq("1", "2").toDS().as("a")
val ds2 = Seq(2, 3).toDS().as("b")
val joined = ds1.joinWith(ds2, $"a.value" === $"b.value")
checkAnswer(joined, ("2", 2))
}
test("self join") {
val ds = Seq("1", "2").toDS().as("a")
val joined = ds.joinWith(ds, lit(true))
checkAnswer(joined, ("1", "1"), ("1", "2"), ("2", "1"), ("2", "2"))
}
test("toString") {
val ds = Seq((1, 2)).toDS()
assert(ds.toString == "[_1: int, _2: int]")
}
test("showString: Kryo encoder") {
implicit val kryoEncoder = Encoders.kryo[KryoData]
val ds = Seq(KryoData(1), KryoData(2)).toDS()
val expectedAnswer = """+-----------+
|| value|
|+-----------+
||KryoData(1)|
||KryoData(2)|
|+-----------+
|""".stripMargin
assert(ds.showString(10) === expectedAnswer)
}
test("Kryo encoder") {
implicit val kryoEncoder = Encoders.kryo[KryoData]
val ds = Seq(KryoData(1), KryoData(2)).toDS()
assert(ds.groupBy(p => p).count().collect().toSeq ==
Seq((KryoData(1), 1L), (KryoData(2), 1L)))
}
test("Kryo encoder self join") {
implicit val kryoEncoder = Encoders.kryo[KryoData]
val ds = Seq(KryoData(1), KryoData(2)).toDS()
assert(ds.joinWith(ds, lit(true)).collect().toSet ==
Set(
(KryoData(1), KryoData(1)),
(KryoData(1), KryoData(2)),
(KryoData(2), KryoData(1)),
(KryoData(2), KryoData(2))))
}
test("Java encoder") {
implicit val kryoEncoder = Encoders.javaSerialization[JavaData]
val ds = Seq(JavaData(1), JavaData(2)).toDS()
assert(ds.groupBy(p => p).count().collect().toSeq ==
Seq((JavaData(1), 1L), (JavaData(2), 1L)))
}
test("Java encoder self join") {
implicit val kryoEncoder = Encoders.javaSerialization[JavaData]
val ds = Seq(JavaData(1), JavaData(2)).toDS()
assert(ds.joinWith(ds, lit(true)).collect().toSet ==
Set(
(JavaData(1), JavaData(1)),
(JavaData(1), JavaData(2)),
(JavaData(2), JavaData(1)),
(JavaData(2), JavaData(2))))
}
test("SPARK-11894: Incorrect results are returned when using null") {
val nullInt = null.asInstanceOf[java.lang.Integer]
val ds1 = Seq((nullInt, "1"), (new java.lang.Integer(22), "2")).toDS()
val ds2 = Seq((nullInt, "1"), (new java.lang.Integer(22), "2")).toDS()
checkAnswer(
ds1.joinWith(ds2, lit(true)),
((nullInt, "1"), (nullInt, "1")),
((new java.lang.Integer(22), "2"), (nullInt, "1")),
((nullInt, "1"), (new java.lang.Integer(22), "2")),
((new java.lang.Integer(22), "2"), (new java.lang.Integer(22), "2")))
}
test("change encoder with compatible schema") {
val ds = Seq(2 -> 2.toByte, 3 -> 3.toByte).toDF("a", "b").as[ClassData]
assert(ds.collect().toSeq == Seq(ClassData("2", 2), ClassData("3", 3)))
}
test("verify mismatching field names fail with a good error") {
val ds = Seq(ClassData("a", 1)).toDS()
val e = intercept[AnalysisException] {
ds.as[ClassData2].collect()
}
assert(e.getMessage.contains("cannot resolve 'c' given input columns: [a, b]"), e.getMessage)
}
test("runtime nullability check") {
val schema = StructType(Seq(
StructField("f", StructType(Seq(
StructField("a", StringType, nullable = true),
StructField("b", IntegerType, nullable = false)
)), nullable = true)
))
def buildDataset(rows: Row*): Dataset[NestedStruct] = {
val rowRDD = sqlContext.sparkContext.parallelize(rows)
sqlContext.createDataFrame(rowRDD, schema).as[NestedStruct]
}
checkAnswer(
buildDataset(Row(Row("hello", 1))),
NestedStruct(ClassData("hello", 1))
)
// Shouldn't throw runtime exception when parent object (`ClassData`) is null
assert(buildDataset(Row(null)).collect() === Array(NestedStruct(null)))
val message = intercept[RuntimeException] {
buildDataset(Row(Row("hello", null))).collect()
}.getMessage
assert(message.contains("Null value appeared in non-nullable field"))
}
test("SPARK-12478: top level null field") {
val ds0 = Seq(NestedStruct(null)).toDS()
checkAnswer(ds0, NestedStruct(null))
checkAnswer(ds0.toDF(), Row(null))
val ds1 = Seq(DeepNestedStruct(NestedStruct(null))).toDS()
checkAnswer(ds1, DeepNestedStruct(NestedStruct(null)))
checkAnswer(ds1.toDF(), Row(Row(null)))
}
}
case class ClassData(a: String, b: Int)
case class ClassData2(c: String, d: Int)
case class ClassNullableData(a: String, b: Integer)
case class NestedStruct(f: ClassData)
case class DeepNestedStruct(f: NestedStruct)
/**
* A class used to test serialization using encoders. This class throws exceptions when using
* Java serialization -- so the only way it can be "serialized" is through our encoders.
*/
case class NonSerializableCaseClass(value: String) extends Externalizable {
override def readExternal(in: ObjectInput): Unit = {
throw new UnsupportedOperationException
}
override def writeExternal(out: ObjectOutput): Unit = {
throw new UnsupportedOperationException
}
}
/** Used to test Kryo encoder. */
class KryoData(val a: Int) {
override def equals(other: Any): Boolean = {
a == other.asInstanceOf[KryoData].a
}
override def hashCode: Int = a
override def toString: String = s"KryoData($a)"
}
object KryoData {
def apply(a: Int): KryoData = new KryoData(a)
}
/** Used to test Java encoder. */
class JavaData(val a: Int) extends Serializable {
override def equals(other: Any): Boolean = {
a == other.asInstanceOf[JavaData].a
}
override def hashCode: Int = a
override def toString: String = s"JavaData($a)"
}
object JavaData {
def apply(a: Int): JavaData = new JavaData(a)
}
| chenc10/Spark-PAF | sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala | Scala | apache-2.0 | 18,953 |
package org.flowpaint.pixelprocessors
import _root_.org.flowpaint.property.Data
import _root_.scala.collection.Map
import org.flowpaint.pixelprocessor.PixelProcessor
import org.flowpaint.util.DataSample
/**
*
*
* @author Hans Haggstrom
*/
class Noise2D extends PixelProcessor(
"", "",
"""
float noiseX$id$ = $getScaleOffsetFloat noiseX, 0f$;
float noiseY$id$ = $getScaleOffsetFloat noiseY, 0f$;
float result$id$ = NoiseUtils.noise2( noiseX$id$, noiseY$id$ ) * 0.5f + 0.5f;
$setScaleOffsetFloat result$ result$id$;
""") | zzorn/flowpaint | src/main/scala/org/flowpaint/pixelprocessors/Noise2D.scala | Scala | gpl-2.0 | 565 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.descriptors
import org.apache.flink.table.api.ValidationException
import org.apache.flink.table.descriptors.DescriptorProperties.toJava
import org.apache.flink.table.descriptors.FunctionDescriptorValidator.FROM
import scala.collection.JavaConverters._
/**
* Validator for [[FunctionDescriptor]].
*/
class FunctionDescriptorValidator extends DescriptorValidator {
override def validate(properties: DescriptorProperties): Unit = {
val classValidation = (_: String) => {
new ClassInstanceValidator().validate(properties)
}
// check for 'from'
if (properties.containsKey(FROM)) {
properties.validateEnum(
FROM,
isOptional = false,
Map(
FunctionDescriptorValidator.FROM_VALUE_CLASS -> toJava(classValidation)
).asJava
)
} else {
throw new ValidationException("Could not find 'from' property for function.")
}
}
}
object FunctionDescriptorValidator {
val FROM = "from"
val FROM_VALUE_CLASS = "class"
}
| zhangminglei/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/FunctionDescriptorValidator.scala | Scala | apache-2.0 | 1,844 |
package views.html.bootstrap3
import play.twirl.api._
import play.twirl.api.TemplateMagic._
import play.api.templates.PlayMagic._
import models._
import controllers._
import java.lang._
import java.util._
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import play.api.i18n._
import play.core.j.PlayMagicForJava._
import play.mvc._
import play.data._
import play.api.data.Field
import play.mvc.Http.Context.Implicit._
import views.html._
/**/
object password extends BaseScalaTemplate[play.twirl.api.HtmlFormat.Appendable,Format[play.twirl.api.HtmlFormat.Appendable]](play.twirl.api.HtmlFormat) with play.twirl.api.Template4[Field,String,String,String,play.twirl.api.HtmlFormat.Appendable] {
/**/
def apply/*1.2*/(field: Field, label:String = "CHANGEME", placeholder: String = "", help: String = ""):play.twirl.api.HtmlFormat.Appendable = {
_display_ {
Seq[Any](format.raw/*1.88*/("""
"""),format.raw/*3.3*/("""<div class="form-group """),_display_(/*3.27*/if(field.hasErrors)/*3.46*/ {_display_(Seq[Any](format.raw/*3.48*/("""has-error""")))}),format.raw/*3.58*/("""">
<label class="col-sm-2 control-label">"""),_display_(/*4.44*/label),format.raw/*4.49*/("""</label>
<div class="col-sm-10">
<input
type="password"
class="form-control"
id=""""),_display_(/*9.14*/field/*9.19*/.id),format.raw/*9.22*/(""""
name=""""),_display_(/*10.16*/field/*10.21*/.name),format.raw/*10.26*/(""""
value=""""),_display_(/*11.17*/field/*11.22*/.value.getOrElse("")),format.raw/*11.42*/(""""
placeholder=""""),_display_(/*12.23*/placeholder),format.raw/*12.34*/("""" />
<span class="help-block">"""),_display_(/*13.33*/help),format.raw/*13.37*/("""</span>
<span class="help-block">"""),_display_(/*14.33*/{field.error.map { error => error.message }}),format.raw/*14.77*/("""</span>
</div>
</div>
"""))}
}
def render(field:Field,label:String,placeholder:String,help:String): play.twirl.api.HtmlFormat.Appendable = apply(field,label,placeholder,help)
def f:((Field,String,String,String) => play.twirl.api.HtmlFormat.Appendable) = (field,label,placeholder,help) => apply(field,label,placeholder,help)
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Wed May 20 14:52:28 IDT 2015
SOURCE: /Users/yonizohar/Dropbox/eclipse-ws-for-play/satBasedDecProc/app/views/bootstrap3/password.scala.html
HASH: 3377cf982d1b813c8f30f3ac7dcda09d27d128c0
MATRIX: 757->1|931->87|961->91|1011->115|1038->134|1077->136|1117->146|1189->192|1214->197|1356->313|1369->318|1392->321|1436->338|1450->343|1476->348|1521->366|1535->371|1576->391|1627->415|1659->426|1723->463|1748->467|1815->507|1880->551
LINES: 26->1|29->1|31->3|31->3|31->3|31->3|31->3|32->4|32->4|37->9|37->9|37->9|38->10|38->10|38->10|39->11|39->11|39->11|40->12|40->12|41->13|41->13|42->14|42->14
-- GENERATED --
*/
| yoni206/ducking-octo-wallhack | target/scala-2.11/twirl/main/views/html/bootstrap3/password.template.scala | Scala | mit | 3,052 |
package controllers.parsing
import models.db.build.DbSaveDismissal
import models.DismissalType.fromString
import play.api.libs.json.JsValue
import play.api.Logger
object DismissalParser {
val highfieldDismissalsName = "highfield_dismissals"
val nonHighfieldDismissalsName = "opposition_dismissals"
val batsmanName = "batsman"
val howoutName = "howout"
val bowlerName = "bowler"
val fielderName = "fielder"
def getDismissals(matchJson: JsValue, highfield: Boolean): Seq[DbSaveDismissal] = {
if(highfield) {
Logger.info("Parsing highfield dismissals")
} else {
Logger.info("Parsing non-highfield dismissals")
}
val jsonKey = {
if(highfield) highfieldDismissalsName
else nonHighfieldDismissalsName
}
val asJsonArray = (matchJson \\ jsonKey).as[Seq[JsValue]]
asJsonArray.map(br => parseDismissal(br))
}
def parseDismissal(disJson: JsValue): DbSaveDismissal = {
val battingPosition = (disJson \\ batsmanName).as[Int]
val howout = (disJson \\ howoutName).as[String]
val bowlerPosition = (disJson \\ bowlerName).as[Int]
val fielderPosition = (disJson \\ fielderName).as[Int]
Logger.debug("Parsed dismissal of batsman no " + battingPosition)
Logger.debug("They were " + howout + " with bowler no " + bowlerPosition + " and fielder no " + fielderPosition)
new DbSaveDismissal(
fromString(howout),
bowlerPosition,
battingPosition,
fielderPosition
)
}
} | ishakir/cric-stat | app/controllers/parsing/DismissalParser.scala | Scala | epl-1.0 | 1,535 |
/*
* Copyright (C) FuseSource, Inc.
* http://fusesource.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.fabric.monitor
import org.fusesource.fabric.api.monitor.{Monitor, PollerFactory, MonitoredSetDTO}
import internal.IOSupport._
import internal.{ClassFinder, DefaultMonitor}
import java.net.URLClassLoader
import java.util.zip.ZipFile
import java.io._
import org.fusesource.fabric.api.monitor.{XmlCodec, JsonCodec}
import collection.mutable.HashMap
import org.fusesource.fabric.api.monitor.MonitoredSetDTO
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object MonitorDeamon {
private val DATA_POLLER_FACTORY_RESOURCE = "META-INF/services/org.fusesource.fabric.monitor/poller-factory.index"
private val finder = new ClassFinder(DATA_POLLER_FACTORY_RESOURCE, classOf[PollerFactory])
def poller_factories = finder.singletons
def main(args: Array[String]):Unit = {
var conf:String = null
// parse the command line options..
var remaining = args.toList
while( !remaining.isEmpty ) {
remaining match {
case "--conf" :: value :: tail =>
conf = value
remaining = tail
case _ =>
System.err.println("invalid arguments: "+remaining.mkString(" "))
System.exit(1)
}
}
if( conf==null ) {
System.err.println("The --conf option was not specified.")
System.exit(1)
}
val conf_dir = new File(conf)
if( !conf_dir.isDirectory ) {
System.err.println("The conf setting '%s' is not a directory".format(conf))
System.exit(1)
}
// Unpack the sigar native libs..
unpack_native_libs
// Load the launcher configurations..
val monitor:Monitor = new DefaultMonitor("")
monitor.poller_factories = poller_factories
while(true) {
monitor.configure(load(conf_dir))
Thread.sleep(1000)
}
}
def load(conf_dir:File):Seq[MonitoredSetDTO] = {
conf_dir.listFiles.flatMap { file=>
if( file.isDirectory ) {
None
} else {
try {
// we support both xml and json formats..
if( file.getName.endsWith(".json") ) {
using( new FileInputStream(file)) { is =>
//Some(JsonCodec.decode(classOf[MonitoredSetDTO], is, System.getProperties))
Some(JsonCodec.decode(classOf[MonitoredSetDTO], is))
}
} else if( file.getName.endsWith(".xml") ) {
using( new FileInputStream(file)) { is =>
//Some(XmlCodec.decode(classOf[MonitoredSetDTO], is, System.getProperties))
Some(XmlCodec.decode(classOf[MonitoredSetDTO], is))
}
} else {
None
}
} catch {
case e:Exception =>
e.printStackTrace
println("Invalid monitor configuration file '%s'. Error: %s".format(file, e.getMessage))
None
}
}
}
}
/**
* Sigar expects the native libs to be in the same directory as the sigar jar file.
*/
def unpack_native_libs: Unit = {
getClass.getClassLoader match {
case x: URLClassLoader =>
x.getURLs.foreach {
url =>
val fn = url.getFile
val file = new File(fn)
if (fn.matches(""".*sigar-[^-]+-native.jar""") && file.exists) {
val zip = new ZipFile(file)
val entries = zip.entries
while (entries.hasMoreElements) {
val entry = entries.nextElement
if (entry.getName.matches(""".*\\.dll|.*\\.so|.*\\.dylib|.*\\.sl|.*\\.nlm""")) {
val target = new File(file.getParentFile, entry.getName)
if (!target.exists || target.length != entry.getSize) {
try {
using(new FileOutputStream(target)) {
os =>
using(zip.getInputStream(entry)) {
is =>
copy(is, os)
}
}
try {
target.setExecutable(true)
} catch {
case _ => // setExecutable is a java 1.6 method.. ignore if it's not available.
}
}
catch {
case e:Throwable => // We probably don't have write access.. ignore.
}
}
}
}
zip.close
}
}
}
}
}
| Jitendrakry/fuse | fabric/fabric-monitor/src/main/scala/org/fusesource/fabric/monitor/MonitorDeamon.scala | Scala | apache-2.0 | 5,123 |
package com.taig.tmpltr.markup
import com.taig.tmpltr._
import play.api.mvc.Content
trait caption
extends Tag.Body[caption, Content]
{
val tag = "caption"
} | Taig/Play-Tmpltr | app/com/taig/tmpltr/markup/caption.scala | Scala | mit | 160 |
package ddp_sequencial
import java.io.File
import scala.xml.{Elem, Node, XML}
class GMLDDPReader(xml:Elem) extends DDPReader(xml) {
def producePolygon(xml:Node) = {
xml match {
case s @ <Polygon>{contents @ _*}</Polygon> => {
val posElem = s \\\\ "posList"
val areaIdElem = s \\ "@{http://www.opengis.net/gml/3.2}id"
val polygon = GeoUtil.polygonFromPosList(posElem.text)
new DDPPolygon(areaIdElem.text,polygon.toText())
}
}
}
override def polygons:List[DDPPolygon] = {
val polygons = for (polygon <- xml \\ "surfaceMembers" \\ "Polygon")
yield producePolygon(polygon)
polygons.toList
}
} | chandonbrut/lrit-ddp-gml-viz | app/util/GMLDDPReader.scala | Scala | apache-2.0 | 682 |
import scala.reflect.runtime.universe._
import scala.reflect.runtime.{universe => ru}
import scala.reflect.runtime.{currentMirror => cm}
import scala.tools.reflect.ToolBox
class C
object Test extends App {
val Block(List(ValDef(_, _, tpt: CompoundTypeTree, _)), _) = reify{ val x: C{} = ??? }.tree
println(tpt)
println(tpt.templ.parents)
println(tpt.templ.self)
println(tpt.templ.body)
}
| som-snytt/dotty | tests/disabled/macro/run/t7235.scala | Scala | apache-2.0 | 400 |
package com.datastax.example.spark.cassandra
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.hive.HiveContext
object AccessingCassandraDataFromSparkInIDE {
def main(args: Array[String]) {
val jars = List("./target/dse-scala-examples-1.0-SNAPSHOT.jar")
val conf = new SparkConf()
.setAppName("Accessing Cassandra Data From Spark (Scala) IDE")
.setMaster("spark://127.0.0.1:7077")
.set("spark.cassandra.connection.host", "127.0.0.1")
.set("spark.cores.max", "3");
conf.setJars(jars)
val sc = new SparkContext(conf);
val hc = new HiveContext(sc)
val data = hc.sql("SELECT * FROM train.user_address")
val results = data.collect()
for (x <- results) {
val addr = x.getString(0);
System.out.println("AddressFromScala is: " + addr);
}
}
} | matt-atwater/DSECodeSamples | ScalaExamples/src/main/scala/com/datastax/example/spark/cassandra/AccessingCassandraDataFromSparkInIDE.scala | Scala | mit | 867 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.cost
import org.apache.calcite.plan.RelOptCost
/**
* A [[RelOptCost]] that extends network cost and memory cost.
*/
trait FlinkCostBase extends RelOptCost {
/**
* @return usage of network resources
*/
def getNetwork: Double
/**
* @return usage of memory resources
*/
def getMemory: Double
}
| ueshin/apache-flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/cost/FlinkCostBase.scala | Scala | apache-2.0 | 1,166 |
package com.intel.analytics.zoo.tutorial
import scopt.OptionParser
object Utils {
case class TestParams(
dimInput: Int = 70,
nHidden: Int = 100,
recordSize: Long = 1e5.toLong,
maxEpoch: Int = 2,
coreNum: Int = 4,
nodeNum: Int = 8,
batchSize: Int = 200
)
val testParser = new OptionParser[TestParams]("BigDL Lenet Test Example") {
opt[Int]('d', "dimInput")
.text("dimension of input")
.action((x, c) => c.copy(dimInput = x))
opt[Int]('h', "nHidden")
.text("Num of hidden layer")
.action((x, c) => c.copy(nHidden = x))
opt[Double]('r', "recordSize")
.text("Total record size")
.action((x, c) => c.copy(recordSize = x.toLong))
opt[Int]('e', "maxEpoch")
.text("maxEpoch")
.action((x, c) => c.copy(maxEpoch = x))
opt[Int]('c', "core")
.text("cores number on each node")
.action((x, c) => c.copy(coreNum = x))
opt[Int]('n', "nodeNumber")
.text("nodes number to train the model")
.action((x, c) => c.copy(nodeNum = x))
opt[Int]('b', "batchSize")
.text("batch size")
.action((x, c) => c.copy(batchSize = x))
}
}
| intel-analytics/BigDL | apps/SimpleMlp/src/main/scala/com/intel/analytics/zoo/tutorial/Utils.scala | Scala | apache-2.0 | 1,324 |
object Test {
def res(x: quoted.Expr[Int])(using scala.quoted.QuoteContext): quoted.Expr[Int] = x match {
case '{ val a: Int = ${ Foo('{ val b: Int = $y; b }) }; a } => y // owner of y is res
}
object Foo {
def unapply(x: quoted.Expr[Int]): Option[quoted.Expr[Int]] = Some(x)
}
}
| som-snytt/dotty | tests/pos/i6214b.scala | Scala | apache-2.0 | 296 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn.internal
import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, DataFormat}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.Shape
import scala.reflect.ClassTag
/**
* Cropping layer for 2D input (e.g. picture).
* The input of this layer should be 4D.
*
* When you use this layer as the first layer of a model, you need to provide the argument
* inputShape (a Single Shape, does not include the batch dimension).
*
* @param heightCrop Int array of length 2. Height of the 2 cropping dimension. Default is (0, 0).
* @param widthCrop Int array of length 2. Width of the 2 cropping dimension. Default is (0, 0).
* @param dimOrdering Format of input data. Either DataFormat.NCHW (dimOrdering='th') or
* DataFormat.NHWC (dimOrdering='tf'). Default is NCHW.
* @tparam T Numeric type of parameter(e.g. weight, bias). Only support float/double now.
*/
class Cropping2D[T: ClassTag](
val heightCrop: Array[Int] = Array(0, 0),
val widthCrop: Array[Int] = Array(0, 0),
val dimOrdering: DataFormat = DataFormat.NCHW,
val inputShape: Shape = null)(implicit ev: TensorNumeric[T])
extends KerasLayer[Tensor[T], Tensor[T], T](KerasLayer.addBatch(inputShape)) {
require(heightCrop.length == 2,
s"Cropping3D: height cropping values should be of length 2, but got ${heightCrop.length}")
require(widthCrop.length == 2,
s"Cropping3D: width cropping values should be of length 2, but got ${widthCrop.length}")
override def doBuild(inputShape: Shape): AbstractModule[Tensor[T], Tensor[T], T] = {
val layer = com.intel.analytics.bigdl.dllib.nn.Cropping2D(
heightCrop = heightCrop,
widthCrop = widthCrop,
format = dimOrdering)
layer.asInstanceOf[AbstractModule[Tensor[T], Tensor[T], T]]
}
}
object Cropping2D {
def apply[@specialized(Float, Double) T: ClassTag](
cropping: ((Int, Int), (Int, Int)) = ((0, 0), (0, 0)),
dimOrdering: String = "th",
inputShape: Shape = null)(implicit ev: TensorNumeric[T]): Cropping2D[T] = {
val heightCrop = Array(cropping._1._1, cropping._1._2)
val widthCrop = Array(cropping._2._1, cropping._2._2)
new Cropping2D[T](heightCrop, widthCrop,
KerasUtils.toBigDLFormat(dimOrdering), inputShape)
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/nn/internal/Cropping2D.scala | Scala | apache-2.0 | 3,016 |
package edu.neu.coe.csye._7200
import org.scalatest.{ FlatSpec, Matchers }
import scala.util._
import edu.neu.coe.csye._7200.trial._
class TrialSpec extends FlatSpec with Matchers {
"First" should """transform "2" into 2""" in {
val trial: Trial[String,Int] = Identity() :| {x: String => Try(x.toInt)}
trial("2") should matchPattern { case Success(2) => }
}
"First with :|" should """transform "2" into 2 and "2.0" into 2.0""" in {
val trial = First[String,Any]{x => Try(x.toInt)} :| {x: String => Try(x.toDouble)}
trial("2") should matchPattern { case Success(2) => }
trial("2.0") should matchPattern { case Success(2.0) => }
}
"First with :| and :|" should """transform "2" into 2, "2.0" into 2.0, and "true" into true""" in {
val trial = First[String,Any]{x => Try(x.toInt)} :| {x: String => Try(x.toDouble)} :| {x: String => Try(x.toBoolean)}
trial("2") should matchPattern { case Success(2) => }
trial("2.0") should matchPattern { case Success(2.0) => }
trial("true") should matchPattern { case Success(true) => }
}
"Identity with :||" should """transform "2" into 2, "2.0" into 2.0, and "true" into true""" in {
val trial = Identity[String,Any]() :|| Seq({x: String => Try(x.toInt)}, {x: String => Try(x.toDouble)}, {x: String => Try(x.toBoolean)})
trial("2") should matchPattern { case Success(2) => }
trial("2.0") should matchPattern { case Success(2.0) => }
trial("true") should matchPattern { case Success(true) => }
}
it should """transform anything into 1""" in {
val trial = Identity[String,Any]() :|| Seq({ _: String => Try(1)}, { _: String => Try(2)}, { _: String => Try(3)})
trial("") should matchPattern { case Success(1) => }
}
"CurriedTrial" should "convert 2 into 5" in {
def addIntToString(x: Int)(s: String): Try[Int] = Try(x+s.toInt)
val three = 3
val trial: String=>Try[Int] = CurriedTrial[Int,String,Int](addIntToString)(three)
trial("2") should matchPattern { case Success(5) => }
}
"CurriedSequence" should "transform anything into 1" in {
def success(n: Int)(s: String): Try[Int] = Success(n)
val gs = Seq(success _, success _, success _)
val ws = Seq(1, 2, 3)
val trial: String=>Try[Int] = CurriedSequence[Int,Int,String,Int](gs)(ws)
trial("") should matchPattern { case Success(1) => }
}
// it should "transform anything into 1 (part two)" in {
// def success1(n: Int)(s: String): Try[Int] = Success(n)
// def success2(n: String)(s: String): Try[Int] = Try(n.toInt)
// val gs = Seq(success1 _, success2 _)
// val ws = Seq(1, "2")
// val trial: String=>Try[Int] = CurriedSequence(gs)(ws)
// trial("") should matchPattern { case Success(1) => }
// }
"Identity" should """fail appropriately, regardless of input""" in {
val trial = Identity[String,Int]()
trial("2") should matchPattern { case Failure(TrialException("identity")) => }
}
it should "combine with trial function to be equivalent of First" in {
val trial = Identity[String,Int]() :| {x: String => Try(x.toInt)}
trial("2") should matchPattern { case Success(2) => }
}
} | rchillyard/Scalaprof | FunctionalProgramming/src/test/scala/edu/neu/coe/csye/_7200/TrialSpec.scala | Scala | gpl-2.0 | 3,127 |
/**
* Copyright 2014 Reverb Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pl.matisoft.swagger
import play.api.inject.{Binding, Module}
import play.api.{Configuration, Environment}
class SwaggerModule extends Module {
override def bindings(environment: Environment, configuration: Configuration): Seq[Binding[_]] = Seq(
bind[SwaggerPlugin].toProvider[SwaggerPluginProvider].eagerly(),
bind[ApiHelpController].toSelf.eagerly()
)
}
| noboomu/swagger-play24 | app/pl/matisoft/swagger/SwaggerModule.scala | Scala | apache-2.0 | 988 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.global
import java.io.{InputStream, OutputStream, PrintStream}
/**
* Represents the global state for input and output streams used to communicate
* standard input and output.
*/
object StreamState {
private val _baseInputStream = System.in
private val _baseOutputStream = System.out
private val _baseErrorStream = System.err
@volatile private var _inputStream = _baseInputStream
@volatile private var _outputStream = _baseOutputStream
@volatile private var _errorStream = _baseErrorStream
private def init(in: InputStream, out: OutputStream, err: OutputStream) =
synchronized {
System.setIn(in)
Console.setIn(in)
System.setOut(new PrintStream(out))
Console.setOut(out)
System.setErr(new PrintStream(err))
Console.setErr(err)
}
private def reset(): Unit = synchronized {
System.setIn(_baseInputStream)
Console.setIn(_baseInputStream)
System.setOut(_baseOutputStream)
Console.setOut(_baseOutputStream)
System.setErr(_baseErrorStream)
Console.setErr(_baseErrorStream)
}
/**
* Sets the internal streams to be used with the stream block.
*
* @param inputStream The input stream to map standard in
* @param outputStream The output stream to map standard out
* @param errorStream The output stream to map standard err
*/
def setStreams(
inputStream: InputStream = _inputStream,
outputStream: OutputStream = _outputStream,
errorStream: OutputStream = _errorStream
) = {
_inputStream = inputStream
_outputStream = new PrintStream(outputStream)
_errorStream = new PrintStream(errorStream)
}
/**
* Execute code block, mapping all input and output to the provided streams.
*/
def withStreams[T](thunk: => T): T = {
init(_inputStream, _outputStream, _errorStream)
val returnValue = thunk
reset()
returnValue
}
}
| yeghishe/spark-kernel | kernel-api/src/main/scala/com/ibm/spark/global/StreamState.scala | Scala | apache-2.0 | 2,496 |
package dresden.crdt
package object json {
// Project-wide typedefs
type ReplicaId = String
}
| jarlopez/dresden | src/main/scala-2.11/dresden/crdt/json/package.scala | Scala | mit | 103 |
package detector
import image._
import file._
class Detector(img:Image) {
def markFaces():Image = {
???
}
}
object Detector{
val haarList = File.readLines("features.txt").map(Haar.fromFileString).toList
val sortedFeatures = haarList.sortBy { x => 1.0-x.weight }
def isFace(original:Image) = {
val intImg = new IntegralImage(original)
val isFace = sortedFeatures.forall { _.isFaceFeature(intImg) }
}
}
// for each scale (multiply by 1.25 at each step)
// for each block of pixels in the image
// if all haar-like features are positive, mark the block
// otherwise move to the next block
| AlexLamson/Face-Detection | src/detector/Detector.scala | Scala | mit | 629 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.compiler.gpu_operator.function
import cogx.platform.types._
import cogx.platform.types.ElementTypes._
import cogx.compiler.parser.semantics.SemanticError
import scala.collection.mutable.ArrayBuffer
import cogx.compiler.parser.syntaxtree.Field
import cogx.compiler.CompilerError
import cogx.compiler.gpu_operator.statement.{EndAnonymous, AnonymousBlock, Assignment}
import cogx.compiler.gpu_operator.declaration.{ConstantExpression, GPUArrayVariable, NamedVariable, GPUVariable}
import cogx.compiler.gpu_operator.Constraints
import cogx.compiler.gpu_operator.types.GPUType
import cogx.compiler.gpu_operator.expression.{ReadTensorElementExpression, ReadTensorExpression, Operator, GPUExpression}
/** Glue between CPU and GPU. Buffer that holds a tensor field. Logically this
* is a single buffer, but current GPU's tend to have a CPU version and a GPU
* version, with copying between them.
*
* Color images require special handling because they are handled in an
* unexpected way on GPUs. Although color image pixels are typically stored
* as four unsigned bytes packed into a 32-bit word, on the GPU they look
* like float4. We special case this here.
*
* @param field The tensor field.
* @param index The index of the input tensor field, starting from 0.
*/
private[gpu_operator]
class FieldBuffer private(val field: Field, index: Int)
extends SemanticError
with Constraints
with CompilerError
{
/** Unique ID for the buffer. */
val fieldType: FieldType = field.fieldType
/** Type of the tensor held in the field buffer. This is lazily evaluated
* because fields holding big tensors don't have a legitimate tensor type
* (OpenCL and CUDA don't support them). This means you can't execute
* small tensor operators (such as tensorVar and readTensor) on a big
* tensor field.
*/
lazy val tensorType: GPUType = {
fieldType.elementType match {
case Uint8Pixel =>
// We only handle color images for now. Note that 8-bit pixels get
// translated to floats on GPU
GPUType(Float32, 4)
case Float32 =>
fieldType.tensorShape.points match {
case 1 => GPUType(Float32, 1)
case 2 => GPUType(Float32, 2)
case 3 => GPUType(Float32, 3)
case 4 => GPUType(Float32, 4)
case x =>
check(required = false, "tensors too big, max elements = " + MaxTensorSize)
throw new Exception("fatal error")
}
case Complex32 =>
fieldType.tensorShape.points match {
case 1 => GPUType(Float32, 2)
case x =>
check(required = false, "only complex scalar fields supported")
throw new Exception("fatal error")
}
case x =>
throw new Exception("unsupported element type: " + x)
}
}
/** Type of the element held in tensors. Hardcoded since generic fields
* are not yet supported.
*/
val elementType: GPUType =
fieldType.elementType match {
case Uint8Pixel =>
GPUType(Float32, 1)
case Float32 =>
GPUType(Float32, 1)
case Complex32 =>
GPUType(Float32, 2)
case x =>
throw new Exception("unsupported element type: " + x)
}
/** Read a tensor from the field buffer and assign it to a variable.
*
* This uses implicit (_layer, _row, _column) addressing. Note that if
* the field is 0D, though, it reads the single tensor in the field.
*
* @return Variable holding the tensor that was read.
*/
def readTensor(): GPUVariable = {
val variable = GPUVariable(tensorType)
val value = ReadTensorExpression(fieldType, tensorType, index, None)
Assignment(variable, "=", value)
variable
}
/** Read a tensor from a 1D field buffer and assign it to a variable.
*
* @param indices The address of the desired tensor in the field.
* @return Variable holding the tensor that was read.
*/
def readTensor(indices: Array[GPUExpression]): GPUVariable = {
val variable = GPUVariable(tensorType)
// Hyperkernels require hardcoded names for indexing, namely "layer",
// "row" and "column". We declare these using named variables. However,
// since they may have already been declared previously, we declare them
// within an anonymous block to avoid name clashes.
AnonymousBlock()
val value = ReadTensorExpression(fieldType, tensorType, index, Some(indices))
Assignment(variable, "=", value)
EndAnonymous()
variable
}
/** Is the user's tensor element index the special local index designator "_tensorElement"? */
private def isLocalElement(element: GPUExpression) = {
element == ConstantExpression._tensorElement
}
//The next two methods require "tensorElement" for addressing, so we need to create
//an anonymous block to generate the appropriate code.
/** Read a tensor element from the field buffer and assign it to a variable.
*
* This uses implicit (_layer, _row, _column) addressing. Note that if
* the field is 0D, though, it reads the single tensor in the field.
*
* @param element The index of the element in the tensor to read.
* @return Variable holding the tensor that was read.
*/
def readTensorElement(element: GPUExpression): GPUVariable = {
val variable = GPUVariable(elementType)
if (!element.gpuType.isInt)
error("integer expression required for tensor element index")
// The readElement macro in HyperKernels requires a parameter named
// "tensorElement." Since that may have already been defined, we create
// an anonymous block to prevent conflicts.
AnonymousBlock()
val tensorElement = NamedVariable(GPUType(Int32, 1), "tensorElement")
tensorElement := element
val value =
ReadTensorElementExpression(fieldType, elementType, index, None, isLocalElement(element))
Assignment(variable, "=", value)
EndAnonymous()
variable
}
/** Read a tensor element from a 1D field buffer and assign it to a variable.
*
* @param indices The address of the desired tensor in the field.
* @param element The index of the element in the tensor to read.
* @return Variable holding the tensor that was read.
*/
def readTensorElement(indices: Array[GPUExpression],
element: GPUExpression): GPUVariable =
{
if (!element.gpuType.isInt)
error("integer expression required for tensor element index")
val variable = GPUVariable(elementType)
// Hyperkernels require hardcoded names for indexing, namely "layer",
// "row" and "column". We declare these using named variables. However,
// since they may have already been declared previously, we declare them
// within an anonymous block to avoid name clashes.
// The same also applies to the tensor element since the readElement
// macro in HyperKernel requires a parameter named "tensorElement."
AnonymousBlock()
// Create "tensorElement" local variable
val tensorElement = NamedVariable(GPUType(Int32, 1), "tensorElement")
tensorElement := element
val value =
ReadTensorElementExpression(fieldType, elementType, index, Some(indices), isLocalElement(element))
Assignment(variable, "=", value)
EndAnonymous()
variable
}
/** Declare a variable that has the same type as tensors in the field.
*/
def tensorVar(): GPUVariable = {
GPUVariable(tensorType)
}
/** Declare an n-D array that has the same type as tensors in the field.
*
* @param size The dimensionality of the field.
*/
def tensorArray(size: Array[GPUExpression]): GPUArrayVariable = {
if (size.length > MaxArrayDimensions)
error("Too many dimensions for array declaration, max = " +
MaxArrayDimensions)
new GPUArrayVariable(tensorType, size)
}
/** Declare a variable that has the same type as tensor elements in the field.
*/
def tensorElementVar(): GPUVariable = {
new GPUVariable(elementType)
}
/** Declare an n-D array that has the same type as tensor elements in
* the field.
*
* @param size The dimensionality of the field.
*/
def tensorElementArray(size: Array[GPUExpression]): GPUArrayVariable = {
if (size.length > MaxArrayDimensions)
error("Too many dimensions for array declaration, max = " +
MaxArrayDimensions)
new GPUArrayVariable(elementType, size)
}
}
/** Factory for creating field buffers.
*
* This is designed to parse individual GPU functions while
* maintaining separate field buffers for each. When a GPU function has
* been parsed, the `getFieldBuffers` method should be called which
* returns all field buffers that have been allocated for that function.
* This call has the side effect of flushing all field buffers that have
* been created for the current GPU function, thus initializing the world
* for parsing the next GPU function.
*
* This object formerly had the field:
*
* private var buffers = new ArrayBuffer[FieldBuffer]
*
* We now use a thread-local version to enable simultaneous Cog compilation from multiple threads.
*/
private[gpu_operator]
object FieldBuffer {
/** All statements in a function. Each thread gets its own instance starting with a zero-length ArrayBuffer. */
private val _buffers = new ThreadLocal[ArrayBuffer[FieldBuffer]] {
override def initialValue() = new ArrayBuffer[FieldBuffer]
}
private def buffers = _buffers.get()
private def buffers_=(newBufferss: ArrayBuffer[FieldBuffer]) { _buffers.set(newBufferss) }
/** Create a FieldBuffer for a field. If a buffer for that field already
* exists, it is reused.
*
* @param field The field for which a field buffer will be allocated or
* reused
* @return The field buffer for field.
*/
def create(field: Field): FieldBuffer = {
// See if a buffer already exists for the field.
for (buffer <- buffers)
if (buffer.field eq field)
return buffer
// No, need to create a new one.
val id = buffers.length
val fieldBuffer = new FieldBuffer(field, id)
buffers += fieldBuffer
fieldBuffer
}
/** Get all field buffers that have been allocated. Calling this empties
* the internal cache of field buffers.
*/
def getFieldBuffers: Array[FieldBuffer] = {
val bufferArray = buffers.toArray
buffers = new ArrayBuffer[FieldBuffer]
bufferArray
}
}
| hpe-cct/cct-core | src/main/scala/cogx/compiler/gpu_operator/function/FieldBuffer.scala | Scala | apache-2.0 | 11,084 |
/*
Author:
Kristal Curtis
*/
package siren
case class SAMTag(tagName: String, tagType: String, value: String) {
override def toString = tagName + ":" + tagType + ":" + value
}
object SAMTag {
val TAG_REGEX = """([A-Za-z][A-Za-z0-9]):(.+):(.+)""".r
def getSAMTag(t: String): SAMTag = {
val TAG_REGEX(tagName, tagType, value) = t
SAMTag(tagName, tagType, value)
}
}
class SAMTags(tags: List[String]) {
val tagsMap = scala.collection.mutable.Map[String, SAMTag]()
tags.foreach(t => {
val samTag = SAMTag.getSAMTag(t)
tagsMap += ((samTag.tagName, samTag))
})
def getTag(tagName: String): SAMTag = {
tagsMap.get(tagName) match {
case Some(tag) => {
tag
} case None => {
println("Warning: tag " + tagName + " not found.")
null
}
}
}
def addTag(tag: SAMTag) {
tagsMap.get(tag.tagName) match {
case Some(samTag) => {
println("Warning: tag " + samTag.toString + " already exists, and you've tried to add it.")
} case None => {
tagsMap += ((tag.tagName, tag))
}
}
}
def addTag(t: String) {
addTag(SAMTag.getSAMTag(t))
}
def addOrOverwriteTag(tag: SAMTag) {
tagsMap.get(tag.tagName) match {
case Some(samTag) => tagsMap.remove(tag.tagName)
case None => println("Warning: tag " + tag.toString + " does not exist, and you've tried to overwrite it.")
}
addTag(tag)
}
// the only thing that COULD be a problem here is the order... hopefully it's ok in random order.
// if not, I'll have to figure out how to sort them
override def toString = {
var s = ""
tagsMap.keys.foreach(k => {
if (s != "")
s += "\\t"
val t = tagsMap.get(k).get
s += t.toString
})
s
}
} | fnothaft/siren-release | src/main/scala/siren/SAMTags.scala | Scala | bsd-2-clause | 1,784 |
trait Foo {
def a = 1
def b = 1
def c = 1
}
class Bar(foo: Foo) {
export foo.{a => _, b => _, _}
val x1 = a // error
val x2 = b // error
}
| som-snytt/dotty | tests/neg/i8368.scala | Scala | apache-2.0 | 152 |
package persistence.dal
import com.typesafe.scalalogging.LazyLogging
import persistence.entities.{ Job, Jobs }
import slick.driver.JdbcProfile
import utils.DbModule
import scala.concurrent.Future
trait JobsDal {
def save(job: Job): Future[Int]
def getJobs(): Future[Seq[Job]]
def getJobsByTestId(testId: Int): Future[Seq[Job]]
def getJobsByProjId(projId: Int): Future[Seq[Job]]
def getJobById(id: Int): Future[Option[Job]]
def getPastJobs(job: Job): Future[Seq[Job]]
def createTables(): Future[Unit]
}
class JobsDalImpl(implicit val db: JdbcProfile#Backend#Database, implicit val profile: JdbcProfile) extends JobsDal with DbModule with Jobs with LazyLogging {
import profile.api._
override def save(job: Job): Future[Int] = db.run((jobs returning jobs.map(_.id)) += job)
override def getJobs(): Future[Seq[Job]] = db.run(jobs.result)
override def getJobsByTestId(testId: Int): Future[Seq[Job]] = db.run(jobs.filter(_.testId === testId).result)
override def getJobsByProjId(projId: Int): Future[Seq[Job]] = db.run(jobs.filter(_.projId === projId).result)
override def getJobById(id: Int): Future[Option[Job]] = db.run(jobs.filter(_.id === id).result.headOption)
// recent jobs appear first
override def getPastJobs(job: Job): Future[Seq[Job]] = {
db.run(jobs.filter { j =>
j.id =!= job.id &&
j.projId === job.projId &&
j.jobName === job.jobName
}.sortBy(_.id.desc).result)
}
override def createTables(): Future[Unit] = db.run(jobs.schema.create)
}
| ShiftForward/ridgeback | src/main/scala/persistence/dal/JobsDal.scala | Scala | mit | 1,523 |
package maker.utils
import java.io.{OutputStream, File}
import java.io.FileOutputStream
import java.io.PrintStream
import org.apache.commons.io.output.TeeOutputStream
case class TeeToFileOutputStream(file : File, os : OutputStream = Console.out) extends OutputStream {
protected def makeTeeStream = {
new PrintStream(
new TeeOutputStream(
os,
new PrintStream(new FileOutputStream(file))
),
true
)
}
var tee = makeTeeStream
def write(b : Int){
tee.write(b)
}
override def flush(){
tee.flush
os.flush
}
}
| cage433/maker | maker/src/maker/utils/TeeToFileOutputStream.scala | Scala | bsd-2-clause | 574 |
case class Test1(); case class Test2(); case class Test3();
case class Test4(); case class Test5(); case class Test6();
sealed abstract class DSL {
def cont [P1 >: this.type <: DSL, P2 <: DSL](continuation: => P2) =
Continue[P1, P2](() => this, () => continuation)
}
case class Continue [P1 <: DSL, P2 <: DSL](p1: () => P1, p2: () => P2) extends DSL
trait More[-A] {}
case class Out[C <: More[A], A](c: C, v: A) extends DSL
case class Nop() extends DSL
val decision1:Boolean = true;
val decision2:Boolean = false;
type P[
ChanA <: More[Test1|Test2],
ChanB <: More[Test3|Test4],
ChanC <: More[Test5|Test6]] =
((Out[ChanA,Test1] Continue ((Out[ChanB,Test3] Continue Nop)|(Out[ChanB,Test4] Continue Nop))) //works if remove first 'Continue Nop'
| (Out[ChanA,Test2] Continue ((Out[ChanC,Test5] Continue Nop)|(Out[ChanC,Test6] Continue Nop))))
def p( chanA: More[Test1|Test2], chanB: More[Test3|Test4], chanC: More[Test5|Test6])
:P[chanA.type,chanB.type,chanC.type] ={
if(decision1){
Out(chanA,Test1()) cont {
if(decision2){
Out(chanB,Test3()) cont Nop() //works if replace with 'Out(chanB,Test3())'
}
else{
Out(chanB,Test4()) cont Nop()
}
}
}
else{
Out(chanA,Test2()) cont {
if(decision2){
Out(chanC,Test5()) cont Nop()
}
else{
Out(chanC,Test6()) cont Nop()
}
}
}
} | dotty-staging/dotty | tests/pos/i12141.scala | Scala | apache-2.0 | 1,495 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.scrunch._
| cloudera/crunch | scrunch/src/main/scripts/imports.scala | Scala | apache-2.0 | 836 |
package com.shocktrade.client.contest
import com.shocktrade.client.MySessionService
import io.scalajs.npm.angularjs._
import io.scalajs.npm.angularjs.toaster.Toaster
import io.scalajs.util.JsUnderOrHelper._
import io.scalajs.util.OptionHelper._
import io.scalajs.util.PromiseHelper.Implicits._
import io.scalajs.util.DurationHelper._
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.scalajs.concurrent.JSExecutionContext.Implicits.queue
import scala.scalajs.js
import scala.util.{Failure, Success}
/**
* Margin Account Controller
* @author Lawrence Daniels <[email protected]>
*/
class MarginAccountController($scope: MarginAccountScope, $timeout: Timeout, toaster: Toaster,
@injected("ContestService") contestService: ContestService,
@injected("MySessionService") mySession: MySessionService,
@injected("PortfolioService") portfolioService: PortfolioService) extends Controller {
private val interestRate = 0.15
private val initialMargin = 0.50
private val maintenanceMargin = 0.25
private var investmentMarketValue = 0.0d
/////////////////////////////////////////////////////////////////////
// Initialization Function
/////////////////////////////////////////////////////////////////////
private var attemptsLeft = 3
$scope.initMarginAccount = () => {
investmentMarketValue = investmentCost
for {
portfolioID <- mySession.portfolio_?.flatMap(_._id.toOption)
} {
// load the margin accounts market value
portfolioService.getMarginAccountMarketValue(portfolioID) onComplete {
case Success(response) =>
investmentMarketValue = response.data.marketValue
case Failure(e) =>
toaster.error("Failed to retrieve the Margin Account's market value")
attemptsLeft -= 1
if (attemptsLeft > 0) $timeout(() => $scope.initMarginAccount(), 5.seconds)
}
}
}
/////////////////////////////////////////////////////////////////////
// Public Functions
/////////////////////////////////////////////////////////////////////
$scope.getAsOfDate = () => mySession.marginAccount_?.flatMap(_.asOfDate.toOption) getOrElse new js.Date()
$scope.getBuyingPower = () => funds / initialMargin
$scope.getCashFunds = () => funds
$scope.getInterestPaid = () => mySession.marginAccount_?.flatMap(_.interestPaid.toOption) orZero
$scope.getInterestRate = () => interestRate
$scope.getInitialMargin = () => initialMargin
$scope.getMaintenanceMargin = () => maintenanceMargin
$scope.getInvestmentCost = () => investmentCost
$scope.getInvestmentMarketValue = () => investmentMarketValue
$scope.isAccountInGoodStanding = () => funds >= maintenanceMarginAmount
$scope.getMarginAccountEquity = () => marginAccountEquity
$scope.getMaintenanceMarginAmount = () => maintenanceMarginAmount
// TODO round to nearest penny
$scope.getMarginCallAmount = () => maintenanceMarginAmount - funds
/////////////////////////////////////////////////////////////////////
// Private Functions
/////////////////////////////////////////////////////////////////////
private def funds = mySession.marginAccount_?.flatMap(_.funds.toOption) orZero
private def investmentCost = {
val outcome = for {
portfolio <- mySession.portfolio_?.toList
positions <- portfolio.positions.toList
marginPositions = positions.filter(_.isMarginAccount)
} yield marginPositions.map(_.totalCost.orZero).sum
outcome.sum
}
private def marginAccountEquity = {
val myInvestmentCost = investmentCost
funds + (Math.max(investmentMarketValue, myInvestmentCost) - myInvestmentCost)
}
private def maintenanceMarginAmount = {
val maintenanceAmount = (investmentCost - marginAccountEquity) * maintenanceMargin
if (maintenanceAmount > 0) maintenanceAmount else 0.0d
}
}
/**
* Margin Account Scope
* @author Lawrence Daniels <[email protected]>
*/
@js.native
trait MarginAccountScope extends Scope {
// functions
var initMarginAccount: js.Function0[Unit] = js.native
var getAsOfDate: js.Function0[js.Date] = js.native
var getBuyingPower: js.Function0[Double] = js.native
var getCashFunds: js.Function0[Double] = js.native
var getInterestPaid: js.Function0[Double] = js.native
var getInterestRate: js.Function0[Double] = js.native
var getInitialMargin: js.Function0[Double] = js.native
var getMaintenanceMargin: js.Function0[Double] = js.native
var getInvestmentCost: js.Function0[Double] = js.native
var getInvestmentMarketValue: js.Function0[Double] = js.native
var isAccountInGoodStanding: js.Function0[Boolean] = js.native
var getMarginAccountEquity: js.Function0[Double] = js.native
var getMaintenanceMarginAmount: js.Function0[Double] = js.native
var getMarginCallAmount: js.Function0[Double] = js.native
}
| ldaniels528/shocktrade.js | app/client/angularjs/src/main/scala/com/shocktrade/client/contest/MarginAccountController.scala | Scala | apache-2.0 | 4,965 |
/*
* Copyright 2014 Databricks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark.csv
import java.io.File
import java.nio.charset.UnsupportedCharsetException
import org.apache.hadoop.io.compress.GzipCodec
import org.apache.spark.sql.Row
import org.apache.spark.sql.test._
import org.apache.spark.SparkException
import org.apache.spark.sql.types._
import org.scalatest.FunSuite
/* Implicits */
import TestSQLContext._
class CsvSuite extends FunSuite {
val carsFile = "src/test/resources/cars.csv"
val carsFile8859 = "src/test/resources/cars_iso-8859-1.csv"
val carsTsvFile = "src/test/resources/cars.tsv"
val carsAltFile = "src/test/resources/cars-alternative.csv"
val nullNumbersFile = "src/test/resources/null-numbers.csv"
val emptyFile = "src/test/resources/empty.csv"
val escapeFile = "src/test/resources/escape.csv"
val tempEmptyDir = "target/test/empty/"
val commentsFile = "src/test/resources/comments.csv"
val disableCommentsFile = "src/test/resources/disable_comments.csv"
val numCars = 3
test("DSL test") {
val results = TestSQLContext
.csvFile(carsFile)
.select("year")
.collect()
assert(results.size === numCars)
}
test("DSL test for iso-8859-1 encoded file") {
val dataFrame = new CsvParser()
.withUseHeader(true)
.withCharset("iso-8859-1")
.withDelimiter('þ')
.csvFile(TestSQLContext, carsFile8859)
assert(dataFrame.select("year").collect().size === numCars)
val results = dataFrame.select("comment", "year").where(dataFrame("year") === "1997")
assert(results.first.getString(0) === "Go get one now they are þoing fast")
}
test("DSL test for bad charset name") {
val parser = new CsvParser()
.withUseHeader(true)
.withCharset("1-9588-osi")
val exception = intercept[UnsupportedCharsetException] {
parser.csvFile(TestSQLContext, carsFile)
.select("year")
.collect()
}
assert(exception.getMessage.contains("1-9588-osi"))
}
test("DDL test") {
sql(
s"""
|CREATE TEMPORARY TABLE carsTable
|USING com.databricks.spark.csv
|OPTIONS (path "$carsFile", header "true")
""".stripMargin.replaceAll("\\n", " "))
assert(sql("SELECT year FROM carsTable").collect().size === numCars)
}
test("DDL test with tab separated file") {
sql(
s"""
|CREATE TEMPORARY TABLE carsTable
|USING com.databricks.spark.csv
|OPTIONS (path "$carsTsvFile", header "true", delimiter "\\t")
""".stripMargin.replaceAll("\\n", " "))
assert(sql("SELECT year FROM carsTable").collect().size === numCars)
}
test("DDL test parsing decimal type") {
sql(
s"""
|CREATE TEMPORARY TABLE carsTable
|(yearMade double, makeName string, modelName string, priceTag decimal,
| comments string, grp string)
|USING com.databricks.spark.csv
|OPTIONS (path "$carsTsvFile", header "true", delimiter "\\t")
""".stripMargin.replaceAll("\\n", " "))
assert(sql("SELECT yearMade FROM carsTable").collect().size === numCars)
assert(sql("SELECT makeName FROM carsTable where priceTag > 60000").collect().size === 1)
}
test("DSL test for DROPMALFORMED parsing mode") {
val results = new CsvParser()
.withParseMode("DROPMALFORMED")
.withUseHeader(true)
.csvFile(TestSQLContext, carsFile)
.select("year")
.collect()
assert(results.size === numCars - 1)
}
test("DSL test for FAILFAST parsing mode") {
val parser = new CsvParser()
.withParseMode("FAILFAST")
.withUseHeader(true)
val exception = intercept[SparkException]{
parser.csvFile(TestSQLContext, carsFile)
.select("year")
.collect()
}
assert(exception.getMessage.contains("Malformed line in FAILFAST mode"))
}
test("DSL test with alternative delimiter and quote") {
val results = new CsvParser()
.withDelimiter('|')
.withQuoteChar('\\'')
.withUseHeader(true)
.csvFile(TestSQLContext, carsAltFile)
.select("year")
.collect()
assert(results.size === numCars)
}
test("DSL test with alternative delimiter and quote using sparkContext.csvFile") {
val results =
TestSQLContext.csvFile(carsAltFile, useHeader = true, delimiter = '|', quote = '\\'')
.select("year")
.collect()
assert(results.size === numCars)
}
test("Expect parsing error with wrong delimiter setting using sparkContext.csvFile") {
intercept[ org.apache.spark.sql.AnalysisException] {
TestSQLContext.csvFile(carsAltFile, useHeader = true, delimiter = ',', quote = '\\'')
.select("year")
.collect()
}
}
test("Expect wrong parsing results with wrong quote setting using sparkContext.csvFile") {
val results =
TestSQLContext.csvFile(carsAltFile, useHeader = true, delimiter = '|', quote = '"')
.select("year")
.collect()
assert(results.slice(0, numCars).toSeq.map(_(0).asInstanceOf[String]) ==
Seq("'2012'", "1997", "2015"))
}
test("DDL test with alternative delimiter and quote") {
sql(
s"""
|CREATE TEMPORARY TABLE carsTable
|USING com.databricks.spark.csv
|OPTIONS (path "$carsAltFile", header "true", quote "'", delimiter "|")
""".stripMargin.replaceAll("\\n", " "))
assert(sql("SELECT year FROM carsTable").collect().size === numCars)
}
test("DDL test with charset") {
sql(
s"""
|CREATE TEMPORARY TABLE carsTable
|USING com.databricks.spark.csv
|OPTIONS (path "$carsFile8859", header "true", delimiter "þ", charset "iso-8859-1")
""".stripMargin.replaceAll("\\n", " "))
assert(sql("SELECT year FROM carsTable").collect().size === numCars)
}
test("DSL test with empty file and known schema") {
val results = new CsvParser()
.withSchema(StructType(List(StructField("column", StringType, false)))).withUseHeader(false)
.csvFile(TestSQLContext, emptyFile)
.count()
assert(results === 0)
}
test("DDL test with empty file") {
sql(s"""
|CREATE TEMPORARY TABLE carsTable
|(yearMade double, makeName string, modelName string, comments string, grp string)
|USING com.databricks.spark.csv
|OPTIONS (path "$emptyFile", header "false")
""".stripMargin.replaceAll("\\n", " "))
assert(sql("SELECT count(*) FROM carsTable").collect().head(0) === 0)
}
test("DDL test with schema") {
sql(s"""
|CREATE TEMPORARY TABLE carsTable
|(yearMade double, makeName string, modelName string, comments string, grp string)
|USING com.databricks.spark.csv
|OPTIONS (path "$carsFile", header "true")
""".stripMargin.replaceAll("\\n", " "))
assert(sql("SELECT makeName FROM carsTable").collect().size === numCars)
assert(sql("SELECT avg(yearMade) FROM carsTable where grp = '' group by grp")
.collect().head(0) === 2004.5)
}
test("DSL column names test") {
val cars = new CsvParser()
.withUseHeader(false)
.csvFile(TestSQLContext, carsFile)
assert(cars.schema.fields(0).name == "C0")
assert(cars.schema.fields(2).name == "C2")
}
test("SQL test insert overwrite") {
// Create a temp directory for table that will be overwritten
TestUtils.deleteRecursively(new File(tempEmptyDir))
new File(tempEmptyDir).mkdirs()
sql(
s"""
|CREATE TEMPORARY TABLE carsTableIO
|USING com.databricks.spark.csv
|OPTIONS (path "$carsFile", header "true")
""".stripMargin.replaceAll("\\n", " "))
sql(s"""
|CREATE TEMPORARY TABLE carsTableEmpty
|(yearMade double, makeName string, modelName string, comments string, grp string)
|USING com.databricks.spark.csv
|OPTIONS (path "$tempEmptyDir", header "false")
""".stripMargin.replaceAll("\\n", " "))
assert(sql("SELECT * FROM carsTableIO").collect().size === numCars)
assert(sql("SELECT * FROM carsTableEmpty").collect().isEmpty)
sql(
s"""
|INSERT OVERWRITE TABLE carsTableEmpty
|SELECT * FROM carsTableIO
""".stripMargin.replaceAll("\\n", " "))
assert(sql("SELECT * FROM carsTableEmpty").collect().size == numCars)
}
test("DSL save") {
// Create temp directory
TestUtils.deleteRecursively(new File(tempEmptyDir))
new File(tempEmptyDir).mkdirs()
val copyFilePath = tempEmptyDir + "cars-copy.csv"
val cars = TestSQLContext.csvFile(carsFile)
cars.saveAsCsvFile(copyFilePath, Map("header" -> "true"))
val carsCopy = TestSQLContext.csvFile(copyFilePath + "/")
assert(carsCopy.count == cars.count)
assert(carsCopy.collect.map(_.toString).toSet == cars.collect.map(_.toString).toSet)
}
test("DSL save with a compression codec") {
// Create temp directory
TestUtils.deleteRecursively(new File(tempEmptyDir))
new File(tempEmptyDir).mkdirs()
val copyFilePath = tempEmptyDir + "cars-copy.csv"
val cars = TestSQLContext.csvFile(carsFile)
cars.saveAsCsvFile(copyFilePath, Map("header" -> "true"), classOf[GzipCodec])
val carsCopy = TestSQLContext.csvFile(copyFilePath + "/")
assert(carsCopy.count == cars.count)
assert(carsCopy.collect.map(_.toString).toSet == cars.collect.map(_.toString).toSet)
}
test("DSL save with quoting") {
// Create temp directory
TestUtils.deleteRecursively(new File(tempEmptyDir))
new File(tempEmptyDir).mkdirs()
val copyFilePath = tempEmptyDir + "cars-copy.csv"
val cars = TestSQLContext.csvFile(carsFile)
cars.saveAsCsvFile(copyFilePath, Map("header" -> "true", "quote" -> "\\""))
val carsCopy = TestSQLContext.csvFile(copyFilePath + "/")
assert(carsCopy.count == cars.count)
assert(carsCopy.collect.map(_.toString).toSet == cars.collect.map(_.toString).toSet)
}
test("DSL save with alternate quoting") {
// Create temp directory
TestUtils.deleteRecursively(new File(tempEmptyDir))
new File(tempEmptyDir).mkdirs()
val copyFilePath = tempEmptyDir + "cars-copy.csv"
val cars = TestSQLContext.csvFile(carsFile)
cars.saveAsCsvFile(copyFilePath, Map("header" -> "true", "quote" -> "!"))
val carsCopy = TestSQLContext.csvFile(copyFilePath + "/", quote = '!')
assert(carsCopy.count == cars.count)
assert(carsCopy.collect.map(_.toString).toSet == cars.collect.map(_.toString).toSet)
}
test("DSL save with quoting, escaped quote") {
// Create temp directory
TestUtils.deleteRecursively(new File(tempEmptyDir))
new File(tempEmptyDir).mkdirs()
val copyFilePath = tempEmptyDir + "escape-copy.csv"
val escape = TestSQLContext.csvFile(escapeFile, escape='|', quote='"')
escape.saveAsCsvFile(copyFilePath, Map("header" -> "true", "quote" -> "\\""))
val escapeCopy = TestSQLContext.csvFile(copyFilePath + "/")
assert(escapeCopy.count == escape.count)
assert(escapeCopy.collect.map(_.toString).toSet == escape.collect.map(_.toString).toSet)
assert(escapeCopy.head().getString(0) == "\\"thing")
}
test("DSL test schema inferred correctly") {
val results = new CsvParser()
.withInferSchema(true)
.withUseHeader(true)
.csvFile(TestSQLContext, carsFile)
assert(results.schema == StructType(List(
StructField("year",IntegerType,true),
StructField("make",StringType,true),
StructField("model",StringType,true),
StructField("comment",StringType,true),
StructField("blank",StringType,true))
))
assert(results.collect().size === numCars)
}
test("DSL test inferred schema passed through") {
val dataFrame = TestSQLContext
.csvFile(carsFile, inferSchema = true)
val results = dataFrame
.select("comment", "year")
.where(dataFrame("year") === 2012)
assert(results.first.getString(0) === "No comment")
assert(results.first.getInt(1) === 2012)
}
test("DDL test with inferred schema") {
sql(
s"""
|CREATE TEMPORARY TABLE carsTable
|USING com.databricks.spark.csv
|OPTIONS (path "$carsFile", header "true", inferSchema "true")
""".stripMargin.replaceAll("\\n", " "))
val results = sql("select year from carsTable where make = 'Ford'")
assert(results.first().getInt(0) === 1997)
}
test("DSL test nullable fields"){
val results = new CsvParser()
.withSchema(StructType(List(StructField("name", StringType, false), StructField("age", IntegerType, true))))
.withUseHeader(true)
.csvFile(TestSQLContext, nullNumbersFile)
.collect()
assert(results.head.toSeq == Seq("alice", 35))
assert(results(1).toSeq == Seq("bob", null))
assert(results(2).toSeq == Seq("", 24))
}
test("Commented lines in CSV data") {
val results: Array[Row] = new CsvParser()
.withDelimiter(',')
.withComment('~')
.csvFile(TestSQLContext, commentsFile)
.collect()
val expected =
Seq(Seq("1", "2", "3", "4", "5"),
Seq("6", "7", "8", "9", "0"),
Seq("1", "2", "3", "4", "5"))
assert(results.toSeq.map(_.toSeq) == expected)
}
test("Setting commment to null disables comment support") {
val results: Array[Row] = new CsvParser()
.withDelimiter(',')
.withComment(null)
.csvFile(TestSQLContext, disableCommentsFile)
.collect()
val expected =
Seq(
Seq("#1", "2", "3"),
Seq("4", "5", "6"))
assert(results.toSeq.map(_.toSeq) == expected)
}
test("DSL load csv from rdd") {
val csvRdd = TestSQLContext.sparkContext.parallelize(Seq("age,height", "20,1.8", "16,1.7"))
val df = new CsvParser().withUseHeader(true).csvRdd(TestSQLContext, csvRdd).collect()
assert(df(0).toSeq == Seq("20", "1.8"))
assert(df(1).toSeq == Seq("16", "1.7"))
}
test("Inserting into csvRdd should throw exception"){
val csvRdd = TestSQLContext.sparkContext.parallelize(Seq("age,height", "20,1.8", "16,1.7"))
val sampleData = TestSQLContext.sparkContext.parallelize(Seq("age,height", "20,1.8", "16,1.7"))
val df = new CsvParser().withUseHeader(true).csvRdd(TestSQLContext, csvRdd)
val sampleDf = new CsvParser().withUseHeader(true).csvRdd(TestSQLContext, sampleData)
df.registerTempTable("csvRdd")
sampleDf.registerTempTable("sampleDf")
val exception = intercept[java.io.IOException] {
sql("INSERT OVERWRITE TABLE csvRdd select * from sampleDf")
}
assert(exception.getMessage.contains("Cannot INSERT into table with no path defined"))
}
test("DSL tsv test") {
val results = TestSQLContext
.tsvFile(carsTsvFile)
.select("year")
.collect()
assert(results.size === numCars)
}
} | karenyyng/spark-csv | src/test/scala/com/databricks/spark/csv/CsvSuite.scala | Scala | apache-2.0 | 15,283 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.aliyun.udfs.tablestore
import org.apache.hadoop.hive.ql.exec.{Description, UDF}
import org.apache.spark.internal.Logging
@Description(
name = ResolveTableStoreBinlogUDF.name,
value = ResolveTableStoreBinlogUDF.value,
extended = ResolveTableStoreBinlogUDF.extendedValue
)
class ResolveTableStoreBinlogUDF extends UDF with Logging {
def evaluate(col: Long, columnType: String): Long = {
ResolveTableStoreBinlogUDF.getActualValue[Long](col, columnType)
}
def evaluate(col: String, columnType: String): String = {
ResolveTableStoreBinlogUDF.getActualValue[String](col, columnType)
}
def evaluate(col: Double, columnType: String): Double = {
ResolveTableStoreBinlogUDF.getActualValue[Double](col, columnType)
}
def evaluate(col: Boolean, columnType: String): Boolean = {
ResolveTableStoreBinlogUDF.getActualValue[Boolean](col, columnType)
}
def evaluate(col: Array[Byte], columnType: String): Array[Byte] = {
ResolveTableStoreBinlogUDF.getActualValue[Array[Byte]](col, columnType)
}
}
object ResolveTableStoreBinlogUDF {
final val name = "ots_col_parser"
final val value = "_FUNC_(<ColumnName>, __ots_column_type_<ColumnName>) " +
"- return the actual column value of tablestore binlog."
final val extendedValue =
// scalastyle:off
"""
|The change data from tablestore has two parts: Predefined columns and user defined columns.
| Predefined Columns:
| __ots_record_type__ (STRING): The record type of the change data, valid in (PUT, UPDATE, DELETE).
| __ots_record_timestamp__ (LONG): The record timestamp of the change data, in nanosecond.
| __ots_column_type_<ColumnName> (STRING):
| The operation of the column in change data, valid in (PUT, DELETE_ONE_VERSION, DELETE_ALL_VERSION).
|
| User defined columns:
| The user defined schema in DataStreamReader(in option catalog).
|
| Example:
| Suppose user defined 7 columns: PkString, PkInt, col_long, col_string, col_binary, col_double, col_boolean
| > select __ots_record_type__ AS RecordType, __ots_record_timestamp__ AS RecordTimestamp, PkString, PkInt,
| ots_col_parser(col_string, __ots_column_type_col_string) AS col_string,
| ots_col_parser(col_long, __ots_column_type_col_long) AS col_long,
| ots_col_parser(col_binary, __ots_column_type_col_binary) AS col_binary,
| ots_col_parser(col_double, __ots_column_type_col_double) AS col_double,
| ots_col_parser(col_boolean, __ots_column_type_col_boolean) AS col_boolean FROM stream_view;
| PUT 1595990621936075 00008 1595990621 str1 123456 null 3.1415 true
|
"""
// scalastyle:on
// Get actual value according column operation type.
def getActualValue[T](origValue: T, columnType: String): T = {
columnType match {
case "PUT" => origValue
case "DELETE_ONE_VERSION" => null.asInstanceOf[T]
case "DELETE_ALL_VERSION" => null.asInstanceOf[T]
case _ => null.asInstanceOf[T]
}
}
}
| aliyun/aliyun-emapreduce-sdk | emr-sql/src/main/scala/org/apache/spark/sql/aliyun/udfs/tablestore/ResolveTableStoreBinlogUDF.scala | Scala | artistic-2.0 | 3,896 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import org.apache.kafka.common.config.SaslConfigs
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.errors.{GroupAuthorizationException, TopicAuthorizationException}
import org.junit.{Before, Test}
import org.junit.Assert.{assertEquals, assertTrue}
import org.scalatest.Assertions.fail
import scala.collection.immutable.List
import scala.jdk.CollectionConverters._
abstract class SaslEndToEndAuthorizationTest extends EndToEndAuthorizationTest {
override protected def securityProtocol = SecurityProtocol.SASL_SSL
override protected val serverSaslProperties = Some(kafkaServerSaslProperties(kafkaServerSaslMechanisms, kafkaClientSaslMechanism))
override protected val clientSaslProperties = Some(kafkaClientSaslProperties(kafkaClientSaslMechanism))
protected def kafkaClientSaslMechanism: String
protected def kafkaServerSaslMechanisms: List[String]
@Before
override def setUp(): Unit = {
// create static config including client login context with credentials for JaasTestUtils 'client2'
startSasl(jaasSections(kafkaServerSaslMechanisms, Option(kafkaClientSaslMechanism), Both))
// set dynamic properties with credentials for JaasTestUtils 'client1' so that dynamic JAAS configuration is also
// tested by this set of tests
val clientLoginContext = jaasClientLoginModule(kafkaClientSaslMechanism)
producerConfig.put(SaslConfigs.SASL_JAAS_CONFIG, clientLoginContext)
consumerConfig.put(SaslConfigs.SASL_JAAS_CONFIG, clientLoginContext)
adminClientConfig.put(SaslConfigs.SASL_JAAS_CONFIG, clientLoginContext)
super.setUp()
}
/**
* Test with two consumers, each with different valid SASL credentials.
* The first consumer succeeds because it is allowed by the ACL,
* the second one connects ok, but fails to consume messages due to the ACL.
*/
@Test(timeout = 15000)
def testTwoConsumersWithDifferentSaslCredentials(): Unit = {
setAclsAndProduce(tp)
val consumer1 = createConsumer()
// consumer2 retrieves its credentials from the static JAAS configuration, so we test also this path
consumerConfig.remove(SaslConfigs.SASL_JAAS_CONFIG)
consumerConfig.remove(SaslConfigs.SASL_CLIENT_CALLBACK_HANDLER_CLASS)
val consumer2 = createConsumer()
consumer1.assign(List(tp).asJava)
consumer2.assign(List(tp).asJava)
consumeRecords(consumer1, numRecords)
try {
consumeRecords(consumer2)
fail("Expected exception as consumer2 has no access to topic or group")
} catch {
// Either exception is possible depending on the order that the first Metadata
// and FindCoordinator requests are received
case e: TopicAuthorizationException => assertTrue(e.unauthorizedTopics.contains(topic))
case e: GroupAuthorizationException => assertEquals(group, e.groupId)
}
confirmReauthenticationMetrics
}
}
| sslavic/kafka | core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala | Scala | apache-2.0 | 3,730 |
package com.richardchankiyin.os
import org.scalatest.FlatSpec
import com.typesafe.scalalogging.Logger
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
import scala.concurrent.duration.Duration
class SchedulerJobTest extends FlatSpec{
val logger = Logger(LoggerFactory.getLogger(this.getClass))
val osname = System.getProperty("os.name").toLowerCase()
logger.debug("osname.indexOf(\\"win\\"): {}", {osname.indexOf("win").toString()})
val isWin = osname.indexOf("win") >= 0
logger.debug("isWin: {} osname: {}", {isWin.toString()}, {osname})
val timeoutCommand = ("timeout 5","sleep 5")
val successCommand = ("java -version","java -version")
val failCommand = ("java -a", "java -a")
def commandToBeTested(isWinEnv:Boolean,commands:(String,String)):String = {
if (isWinEnv)
commands._1
else
commands._2
}
"Schedule Job" should "run handleSuccess" in {
val job = new ScheduleJob("", "success testing", commandToBeTested(isWin,successCommand), 10 seconds
, ()=>{logger.debug("OK")}, (t)=>{fail("unexpected")})
job.execute
}
"Schedule Job" should "run handleFailure" in {
val job = new ScheduleJob("", "failed testing", commandToBeTested(isWin,failCommand), 10 seconds
, ()=>{fail("unexpected!")}, (t)=>{logger.debug("OK")})
job.execute
}
"Schedule Job" should "run handleFailure in timeout" in {
val job = new ScheduleJob("", "timeout testing", commandToBeTested(isWin,timeoutCommand), 1 milliseconds
, ()=>{fail("unexpected!")}, (t)=>{logger.debug("OK")})
}
} | richardchankiyin/sysdashboard | HealthCheck/src/test/scala/com/richardchankiyin/os/SchedulerJobTest.scala | Scala | gpl-3.0 | 1,589 |
package controllers
import javax.inject.Inject
import play.api.mvc.{BaseController, ControllerComponents}
import views.About
import scala.concurrent.ExecutionContext
/**
*
* @author ponkotuy
* Date: 14/10/11.
\\*/
class ViewAbout @Inject()(val controllerComponents: ControllerComponents, implicit val ec: ExecutionContext) extends BaseController {
import controllers.Common._
def setup = actionAsync { Redirect(About.Top) }
def changeLog = actionAsync { Redirect(About.ChangeLog) }
def faq = actionAsync { Redirect(About.Faq) }
def setupDetail = actionAsync { Redirect(About.SetupDetail) }
}
| ttdoda/MyFleetGirls | server/app/controllers/ViewAbout.scala | Scala | mit | 610 |
/*
* Copyright (C) 2009-2013 Mathias Doenitz, Alexander Myltsev
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s.internal.parboiled2
import org.specs2.mutable.Specification
import org.specs2.ScalaCheck
import org.scalacheck.{Gen, Prop}
class CharUtilsSpec extends Specification with ScalaCheck {
val hexChars = for (i ← Gen.choose(0, 15)) yield i -> Integer.toHexString(i).charAt(0)
"CharUtils" >> {
"hexValue" in {
val p = Prop.forAll(hexChars) { case (i, c) ⇒ CharUtils.hexValue(c) == i }
check(p, defaultParameters, defaultFreqMapPretty)
}
"numberOfHexDigits" in prop {
l: Long ⇒ CharUtils.numberOfHexDigits(l) === java.lang.Long.toHexString(l).length
}
"upperHexString" in prop {
l: Long ⇒ CharUtils.upperHexString(l) === java.lang.Long.toHexString(l).toUpperCase
}
"lowerHexString" in prop {
l: Long ⇒ CharUtils.lowerHexString(l) === java.lang.Long.toHexString(l)
}
"numberOfDecimalDigits" in prop {
l: Long ⇒ CharUtils.numberOfDecimalDigits(l) === java.lang.Long.toString(l).length
}
"signedDecimalString" in prop {
l: Long ⇒ CharUtils.signedDecimalString(l) === java.lang.Long.toString(l)
}
}
}
| ZizhengTai/http4s | parboiled2/src/test/scala/org/http4s/internal/parboiled2/CharUtilsSpec.scala | Scala | apache-2.0 | 1,746 |
package net.liftweb.util
/*
* Copyright 2007-2009 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
import _root_.java.net.InetAddress
import _root_.java.util.Properties
import Helpers._
/**
* Configuration management utilities.
*
* If you want to provide a configuration file for a subset of your application
* or for a specifig environment, Lift expects configuration files to be named
* in a manner relating to the context in which they are being used. The standard
* name format is:
*
* <pre>
* modeName.hostName.userName.filename.extension
* </pre>
*
* with hostName and userName being optional, and modeName being one of
* "test", "staging", "production", "pilot", "profile", or "default.
* The standard Lift properties file extension is "props".
*/
object Props {
/**
* Get the configuration property value for the specified key.
* @param name key for the property to get
* @return the value of the property if defined
*/
def get(name: String): Box[String] = Box(props.get(name))
// def apply(name: String): String = props(name)
def getInt(name: String): Box[Int] = get(name).map(toInt) // toInt(props.get(name))
def getInt(name: String, defVal: Int): Int = getInt(name) openOr defVal // props.get(name).map(toInt(_)) getOrElse defVal
def getLong(name: String): Box[Long] = props.get(name).flatMap(asLong)
def getLong(name: String, defVal: Long): Long = getLong(name) openOr defVal // props.get(name).map(toLong(_)) getOrElse defVal
def getBool(name: String): Box[Boolean] = props.get(name).map(toBoolean) // (props.get(name))
def getBool(name: String, defVal: Boolean): Boolean = getBool(name) openOr defVal // props.get(name).map(toBoolean(_)) getOrElse defVal
def get(name: String, defVal: String) = props.get(name) getOrElse defVal
/**
* Determine whether the specified properties exist.
* @param what the properties to test
* @return the subset of strings in 'what' that do not correspond to
* keys for available properties.
*/
def require(what: String*) = what.filter(!props.contains(_))
/**
* Ensure that all of the specified properties exist; throw an exception if
* any of the specified values are not keys for available properties.
*/
def requireOrDie(what: String*) {
require(what :_*).toList match {
case Nil =>
case bad => throw new Exception("The following required properties are not defined: "+bad.mkString(","))
}
}
/**
* Enumeration of available run modes.
*/
object RunModes extends Enumeration {
val Development = Value(1, "Development")
val Test = Value(2, "Test")
val Staging = Value(3, "Staging")
val Production = Value(4, "Production")
val Pilot = Value(5, "Pilot")
val Profile = Value(6, "Profile")
}
import RunModes._
val propFileName = "lift.props"
val fileName = "lift.props"
/**
* The mode for which to retrieve properties, retrieved by System.getProperty("run.mode").
* Recognized modes are "development", "test", "profile", "pilot", "staging" and "production"
* with the default run mode being development.
*/
lazy val mode = Box.legacyNullTest((System.getProperty("run.mode"))).map(_.toLowerCase) match {
case Full("test") => Test
case Full("production") => Production
case Full("staging") => Staging
case Full("pilot") => Pilot
case Full("profile") => Profile
case _ => Development
}
/**
* The resource path segment corresponding to the current mode.
*/
lazy val modeName = mode match {
case Test => "test."
case Staging => "staging."
case Production => "production."
case Pilot => "pilot."
case Profile => "profile."
case _ => ""
}
/**
* The resource path segment corresponding to the current system user
* (from System.getProperty("user.name"))
*/
lazy val userName = System.getProperty("user.name") + "."
/**
* Is the app running in the Google App engine (the System property in.gae.j is set)
*/
lazy val inGAE: Boolean = System.getProperty("in.gae.j") != null
/**
* The resource path segment corresponding to the system hostname.
*/
lazy val hostName: String = if (inGAE) "GAE" else InetAddress.getLocalHost.getHostName + "."
/**
* The list of paths to search for property file resources.
* Properties files may be found at either the classpath root or
* in /props
*/
lazy val toTry: List[() => String] = List(
() => "/props/" + modeName + userName + hostName,
() => "/props/" + modeName + userName,
() => "/props/" + modeName + hostName,
() => "/props/" + modeName + "default.",
() => "/" + modeName + userName + hostName,
() => "/" + modeName + userName,
() => "/" + modeName + hostName,
() => "/" + modeName + "default.")
/**
* The map of key/value pairs retrieved from the property file.
*/
lazy val props = {
// find the first property file that is available
first(toTry)(f => tryo(getClass.getResourceAsStream(f()+"props")).filter(_ ne null)).map{s => val ret = new Properties; ret.load(s); ret} match {
// if we've got a propety file, create name/value pairs and turn them into a Map
case Full(prop) =>
Map(prop.entrySet.toArray.map{
s2 =>
val s = s2.asInstanceOf[_root_.java.util.Map.Entry[String, String]]
(s.getKey,s.getValue)
} :_*)
case _ => Map.empty[String, String] // if none, it's an empty map
}
}
}
| beni55/liftweb | lift-util/src/main/scala/net/liftweb/util/Props.scala | Scala | apache-2.0 | 6,091 |
// Equites, a Scala chess playground
// Copyright © 2013-2014 Frank S. Thomas <[email protected]>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eu.timepit.equites
package implicits
object PlacedImplicits {
implicit def unwrapPlaced[A](placed: Placed[A]): A = placed.elem
}
| equites-chess/equites-core | src/main/scala/eu/timepit/equites/implicits/PlacedImplicits.scala | Scala | gpl-3.0 | 888 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.integration.torch
import com.intel.analytics.bigdl.dllib.nn.SoftMin
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.Engine
import scala.util.Random
@com.intel.analytics.bigdl.tags.Serial
class SoftMinSpec extends TorchSpec {
"A SoftMin 1D input" should "generate correct output and grad" in {
torchCheck()
val layer = new SoftMin[Double]()
val input = Tensor[Double](10)
input.apply1(_ => Random.nextDouble())
val gradOutput = Tensor[Double](10)
gradOutput.apply1(_ => Random.nextDouble())
val start = System.nanoTime()
val output = layer.forward(input)
val gradInput = layer.backward(input, gradOutput)
val end = System.nanoTime()
val scalaTime = end - start
val code = "module = nn.SoftMin()\n" +
"output = module:forward(input)\n" +
"gradInput = module:backward(input,gradOutput)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput"))
val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]]
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]]
output should be (luaOutput)
gradInput should be (luaGradInput)
println("Test case : SoftMin, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s")
}
"A SoftMin 2D input" should "generate correct output and grad" in {
torchCheck()
val layer = new SoftMin[Double]()
val input = Tensor[Double](3, 5)
input.apply1(_ => Random.nextDouble())
val gradOutput = Tensor[Double](3, 5)
gradOutput.apply1(_ => Random.nextDouble())
val start = System.nanoTime()
val output = layer.forward(input)
val gradInput = layer.backward(input, gradOutput)
val end = System.nanoTime()
val scalaTime = end - start
val code = "module = nn.SoftMin()\n" +
"output = module:forward(input)\n" +
"gradInput = module:backward(input,gradOutput)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput"))
val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]]
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]]
output should be (luaOutput)
gradInput should be (luaGradInput)
println("Test case : SoftMin, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s")
}
"A SoftMin 3D input" should "generate correct output and grad" in {
torchCheck()
val layer = new SoftMin[Double]()
val input = Tensor[Double](4, 6, 6)
input.apply1(_ => Random.nextDouble())
val gradOutput = Tensor[Double](4, 6, 6)
gradOutput.apply1(_ => Random.nextDouble())
val start = System.nanoTime()
val output = layer.forward(input)
val gradInput = layer.backward(input, gradOutput)
val end = System.nanoTime()
val scalaTime = end - start
val code = "module = nn.SoftMin()\n" +
"output = module:forward(input)\n" +
"gradInput = module:backward(input,gradOutput)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput"))
val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]]
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]]
output should be (luaOutput)
gradInput should be (luaGradInput)
println("Test case : SoftMin, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s")
}
"A SoftMin 4D input" should "generate correct output and grad" in {
torchCheck()
val layer = new SoftMin[Double]()
val input = Tensor[Double](3, 5, 6, 6)
input.apply1(_ => Random.nextDouble())
val gradOutput = Tensor[Double](3, 5, 6, 6)
gradOutput.apply1(_ => Random.nextDouble())
val start = System.nanoTime()
val output = layer.forward(input)
val gradInput = layer.backward(input, gradOutput)
val end = System.nanoTime()
val scalaTime = end - start
val code = "module = nn.SoftMin()\n" +
"output = module:forward(input)\n" +
"gradInput = module:backward(input,gradOutput)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput"))
val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]]
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]]
output should be (luaOutput)
gradInput should be (luaGradInput)
println("Test case : SoftMin, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s")
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/integration/torch/SoftMinSpec.scala | Scala | apache-2.0 | 5,278 |
package lila.search
import com.sksamuel.elastic4s.ElasticDsl._
final class Range[A] private (val a: Option[A], val b: Option[A]) {
def queries(name: String) =
a.fold(b.toList map { bb => rangeQuery(name) lte bb.toString }) { aa =>
b.fold(List(rangeQuery(name) gte aa.toString)) { bb =>
List(rangeQuery(name) gte aa.toString lte bb.toString)
}
}
def map[B](f: A => B) = new Range(a map f, b map f)
def nonEmpty = a.nonEmpty || b.nonEmpty
}
object Range {
import play.api.libs.json._
import play.api.libs.functional.syntax._
implicit def rangeJsonReader[A: Reads: Ordering]: Reads[Range[A]] =
(
(__ \\ "a").readNullable[A] and
(__ \\ "b").readNullable[A]
) { (a, b) => Range(a, b) }
def apply[A](a: Option[A], b: Option[A])(implicit o: Ordering[A]): Range[A] =
(a, b) match {
case (Some(aa), Some(bb)) =>
o.lt(aa, bb)
.fold(
new Range(a, b),
new Range(b, a)
)
case (x, y) => new Range(x, y)
}
def none[A]: Range[A] = new Range(None, None)
}
| ornicar/lila-search | app/Range.scala | Scala | agpl-3.0 | 1,085 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.logical
import org.apache.flink.table.plan.nodes.FlinkConventions
import org.apache.calcite.plan._
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.core.Snapshot
import org.apache.calcite.rel.logical.LogicalSnapshot
import org.apache.calcite.rel.metadata.{RelMdCollation, RelMetadataQuery}
import org.apache.calcite.rel.{RelCollation, RelCollationTraitDef, RelNode}
import org.apache.calcite.rex.RexNode
import java.util
import java.util.function.Supplier
/**
* Sub-class of [[Snapshot]] that is a relational expression which returns
* the contents of a relation expression as it was at a given time in the past.
*/
class FlinkLogicalSnapshot(
cluster: RelOptCluster,
traits: RelTraitSet,
child: RelNode,
period: RexNode)
extends Snapshot(cluster, traits, child, period)
with FlinkLogicalRel {
override def copy(
traitSet: RelTraitSet,
input: RelNode,
period: RexNode): Snapshot = {
new FlinkLogicalSnapshot(cluster, traitSet, input, period)
}
override def computeSelfCost(planner: RelOptPlanner, mq: RelMetadataQuery): RelOptCost = {
val rowCnt = mq.getRowCount(this)
val rowSize = mq.getAverageRowSize(this)
planner.getCostFactory.makeCost(rowCnt, rowCnt, rowCnt * rowSize)
}
}
class FlinkLogicalSnapshotConverter
extends ConverterRule(
classOf[LogicalSnapshot],
Convention.NONE,
FlinkConventions.LOGICAL,
"FlinkLogicalSnapshotConverter") {
def convert(rel: RelNode): RelNode = {
val snapshot = rel.asInstanceOf[LogicalSnapshot]
val newInput = RelOptRule.convert(snapshot.getInput, FlinkConventions.LOGICAL)
FlinkLogicalSnapshot.create(newInput, snapshot.getPeriod)
}
}
object FlinkLogicalSnapshot {
val CONVERTER = new FlinkLogicalSnapshotConverter
def create(input: RelNode, period: RexNode): FlinkLogicalSnapshot = {
val cluster = input.getCluster
val mq = cluster.getMetadataQuery
val traitSet = cluster.traitSet.replace(Convention.NONE).replaceIfs(
RelCollationTraitDef.INSTANCE, new Supplier[util.List[RelCollation]]() {
def get: util.List[RelCollation] = RelMdCollation.snapshot(mq, input)
})
val snapshot = new FlinkLogicalSnapshot(cluster, traitSet, input, period)
val newTraitSet = snapshot.getTraitSet
.replace(FlinkConventions.LOGICAL).simplify()
snapshot.copy(newTraitSet, input, period).asInstanceOf[FlinkLogicalSnapshot]
}
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/nodes/logical/FlinkLogicalSnapshot.scala | Scala | apache-2.0 | 3,287 |
package info.mornlight.oneopus.ui
import javafx.scene.layout.HBox
import javafx.scene.control.{TextField, Label}
/**
* Created by alfred on 11/8/13.
*/
class FilterBar extends HBox with ListerBar {
private val label = new Label("Command:")
private val input = new TextField
getChildren.addAll(label, input)
def focusInput() {
input.requestFocus()
}
}
| xiaodongw/oneopus | app/src/main/scala/info/mornlight/oneopus/ui/FilterBar.scala | Scala | apache-2.0 | 371 |
/*
*
* * Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*
*/
package play.api.libs.ws.ssl
object Protocols {
/**
* Protocols which are known to be insecure.
*/
val deprecatedProtocols = Set("SSL", "SSLv2Hello", "SSLv3")
val recommendedProtocols = Array("TLSv1.2", "TLSv1.1", "TLSv1")
// Use 1.2 as a default in 1.7, use 1.0 in 1.6
// https://docs.fedoraproject.org/en-US/Fedora_Security_Team//html/Defensive_Coding/sect-Defensive_Coding-TLS-Client-OpenJDK.html
def recommendedProtocol = foldVersion(run16 = "TLSv1", runHigher = "TLSv1.2")
}
| jyotikamboj/container | pf-framework/src/play-ws/src/main/scala/play/api/libs/ws/ssl/Protocols.scala | Scala | mit | 586 |
package jp.bracken.scalastro
/* Copyright (C) 2013 Chris Bracken
*
* This file is part of Scalastro.
*
* Scalastro is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scalastro is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scalastro. If not, see <http://www.gnu.org/licenses/>.
*/
import org.joda.time.chrono.ISOChronology
import org.joda.time.chrono.JulianChronology
import org.joda.time.DateTime
import org.joda.time.DateTimeZone
import org.joda.time.Duration
import org.scalatest._
class JulianDateTest extends FlatSpec with ShouldMatchers {
"JulianDate.fromInstant" should "return 0 for the epoch" in {
val t = new DateTime(-4713, 1, 1, 12, 0,
JulianChronology.getInstance(DateTimeZone.UTC))
JulianDate.fromInstant(t) should be (0.0)
}
it should "handle dates from Julian Chronology" in {
val t = new DateTime(400, 1, 1, 12, 0,
JulianChronology.getInstance(DateTimeZone.UTC))
JulianDate.fromInstant(t) should be (1867158.0)
}
it should "handle dates from Gregorian Chronology" in {
val t = new DateTime(2000, 1, 1, 12, 0, DateTimeZone.UTC)
JulianDate.fromInstant(t) should be (2451545.0)
}
it should "handle fractional days" in {
val t = new DateTime(2000, 1, 1, 2, 20, 15, 332, DateTimeZone.UTC)
JulianDate.fromInstant(t) should be (2451544.5973996758)
}
"JulianDate.toDateTime" should "return the epoch for 0" in {
val julianChron = JulianChronology.getInstance(DateTimeZone.UTC)
val epoch = new DateTime(-4713, 1, 1, 12, 0, julianChron)
val dateTime = JulianDate.toDateTime(0.0)
dateTime.toDateTime(julianChron) should be (epoch)
val isoChron = ISOChronology.getInstance(DateTimeZone.UTC)
dateTime.getChronology should be (isoChron)
}
it should "handle fractional days" in {
val t = new DateTime(2000, 1, 1, 2, 20, 15, 332, DateTimeZone.UTC)
JulianDate.toDateTime(2451544.5973996758) should be (t)
}
}
| cbracken/scalastro | src/test/scala/jp/bracken/scalastro/JulianDateTest.scala | Scala | gpl-3.0 | 2,386 |
package com.github.jmccrae
import java.sql.{Connection, PreparedStatement, ResultSet}
import org.scalatest._
import org.scalamock.scalatest.MockFactory
class RDFSQLUtilsTest extends WordSpec with Matchers with MockFactory {
import sqlutils._
"A SQL string" when {
"without variables" should {
"produce a prepared statement" in {
val conn = mock[Connection]
withSession(conn) { implicit session =>
(conn.prepareStatement(_ : String)).expects("test")
sql"""test""" }}}
"with variable" should {
"produce a prepared statement" in {
val conn = mock[Connection]
val x = "foo"
val ps = mock[PreparedStatement]
withSession(conn) { implicit session =>
(conn.prepareStatement(_ : String)).expects("test ? test").returning(ps)
(ps.setString _).expects(1, "foo")
sql"""test $x test""" }}}
"without results" should {
"execute" in {
val conn = mock[Connection]
val ps = mock[PreparedStatement]
withSession(conn) { implicit session =>
(conn.prepareStatement(_ : String)).
expects("select count(*) from table").
returning(ps)
sql"""select count(*) from table""" }}}
"with results" should {
"return results" in {
val conn = mock[Connection]
val ps = mock[PreparedStatement]
val rs = mock[ResultSet]
withSession(conn) { implicit session =>
(conn.prepareStatement(_ : String)).
expects("select * from table").
returning(ps)
(ps.executeQuery _).
expects().
returning(rs)
(rs.next _).
expects().
returning(false)
sql"""select * from table""".as1[Int] }}}
"with ?" should {
"insert" in {
val conn = mock[Connection]
val ps = mock[PreparedStatement]
(conn.prepareStatement(_ : String)).
expects("insert into table values (?, ?)").
returning(ps)
(ps.setInt _).
expects(1, 10)
(ps.setString _).
expects(2, "foo")
(ps.execute _).
expects().
returning(true)
withSession(conn) { implicit session =>
sql"""insert into table values (?, ?)""".insert(10, "foo") }}}
"with ?" should {
"insert2" in {
val conn = mock[Connection]
val ps = mock[PreparedStatement]
(conn.prepareStatement(_ : String)).
expects("insert into table values (?, ?)").
returning(ps)
(ps.setInt _).
expects(1, 10)
(ps.setString _).
expects(2, "foo")
(ps.addBatch _).
expects()
(ps.executeBatch _).
expects().
returning(Array(1))
withSession(conn) { implicit session =>
val stat = sql"""insert into table values (?, ?)""".insert2[Int, String]
stat(10, "foo")
stat.execute }}}
}
}
| jmccrae/yuzu | scala/src/test/scala/yuzu/test_rdfsql.scala | Scala | apache-2.0 | 3,001 |
package lila.tournament
import akka.stream.Materializer
import akka.stream.scaladsl._
import BSONHandlers._
import org.joda.time.DateTime
import reactivemongo.akkastream.{ cursorProducer, AkkaStreamCursor }
import reactivemongo.api.bson._
import reactivemongo.api.ReadPreference
import lila.db.dsl._
import lila.game.Game
import lila.user.User
final class PairingRepo(coll: Coll)(implicit ec: scala.concurrent.ExecutionContext, mat: Materializer) {
def selectTour(tourId: Tournament.ID) = $doc("tid" -> tourId)
def selectUser(userId: User.ID) = $doc("u" -> userId)
private def selectTourUser(tourId: Tournament.ID, userId: User.ID) =
$doc(
"tid" -> tourId,
"u" -> userId
)
private val selectPlaying = $doc("s" $lt chess.Status.Mate.id)
private val selectFinished = $doc("s" $gte chess.Status.Mate.id)
private val recentSort = $doc("d" -> -1)
private val chronoSort = $doc("d" -> 1)
def byId(id: Tournament.ID): Fu[Option[Pairing]] = coll.find($id(id)).one[Pairing]
private[tournament] def lastOpponents(
tourId: Tournament.ID,
userIds: Set[User.ID],
max: Int
): Fu[Pairing.LastOpponents] =
userIds.nonEmpty.?? {
val nbUsers = userIds.size
coll
.find(
selectTour(tourId) ++ $doc("u" $in userIds),
$doc("_id" -> false, "u" -> true).some
)
.sort(recentSort)
.batchSize(20)
.cursor[Bdoc]()
.documentSource(max)
.mapConcat(_.getAsOpt[List[User.ID]]("u").toList)
.scan(Map.empty[User.ID, User.ID]) {
case (acc, List(u1, u2)) =>
val b1 = userIds.contains(u1)
val b2 = !b1 || userIds.contains(u2)
val acc1 = if (!b1 || acc.contains(u1)) acc else acc.updated(u1, u2)
if (!b2 || acc.contains(u2)) acc1 else acc1.updated(u2, u1)
case (acc, _) => acc
}
.takeWhile(
r => r.sizeIs < nbUsers,
inclusive = true
)
.toMat(Sink.lastOption)(Keep.right)
.run()
.dmap(~_)
} dmap Pairing.LastOpponents.apply
def opponentsOf(tourId: Tournament.ID, userId: User.ID): Fu[Set[User.ID]] =
coll
.find(
selectTourUser(tourId, userId),
$doc("_id" -> false, "u" -> true).some
)
.cursor[Bdoc]()
.list()
.dmap {
_.view.flatMap { doc =>
~doc.getAsOpt[List[User.ID]]("u").find(userId !=)
}.toSet
}
def recentIdsByTourAndUserId(tourId: Tournament.ID, userId: User.ID, nb: Int): Fu[List[Tournament.ID]] =
coll
.find(
selectTourUser(tourId, userId),
$doc("_id" -> true).some
)
.sort(recentSort)
.cursor[Bdoc]()
.list(nb)
.dmap {
_.flatMap(_.getAsOpt[Game.ID]("_id"))
}
def playingByTourAndUserId(tourId: Tournament.ID, userId: User.ID): Fu[Option[Game.ID]] =
coll
.find(
selectTourUser(tourId, userId) ++ selectPlaying,
$doc("_id" -> true).some
)
.sort(recentSort)
.one[Bdoc]
.dmap {
_.flatMap(_.getAsOpt[Game.ID]("_id"))
}
def removeByTour(tourId: Tournament.ID) = coll.delete.one(selectTour(tourId)).void
private[tournament] def forfeitByTourAndUserId(tourId: Tournament.ID, userId: User.ID): Funit =
coll
.list[Pairing](selectTourUser(tourId, userId))
.flatMap {
_.withFilter(_ notLostBy userId).map { p =>
coll.update.one(
$id(p.id),
$set(
"w" -> p.colorOf(userId).map(_.black)
)
)
}.sequenceFu
}
.void
def count(tourId: Tournament.ID): Fu[Int] =
coll.countSel(selectTour(tourId))
private[tournament] def countByTourIdAndUserIds(tourId: Tournament.ID): Fu[Map[User.ID, Int]] = {
coll
.aggregateList(maxDocs = 10000, ReadPreference.secondaryPreferred) { framework =>
import framework._
Match(selectTour(tourId)) -> List(
Project($doc("u" -> true, "_id" -> false)),
UnwindField("u"),
GroupField("u")("nb" -> SumAll),
Sort(Descending("nb"))
)
}
.map {
_.view.flatMap { doc =>
doc.getAsOpt[User.ID]("_id") flatMap { uid =>
doc.int("nb") map { uid -> _ }
}
}.toMap
}
}
def removePlaying(tourId: Tournament.ID) = coll.delete.one(selectTour(tourId) ++ selectPlaying).void
def findPlaying(tourId: Tournament.ID, userId: User.ID): Fu[Option[Pairing]] =
coll.find(selectTourUser(tourId, userId) ++ selectPlaying).one[Pairing]
def isPlaying(tourId: Tournament.ID, userId: User.ID): Fu[Boolean] =
coll.exists(selectTourUser(tourId, userId) ++ selectPlaying)
private[tournament] def finishedByPlayerChronological(
tourId: Tournament.ID,
userId: User.ID
): Fu[Pairings] =
coll
.find(
selectTourUser(tourId, userId) ++ selectFinished
)
.sort(chronoSort)
.cursor[Pairing]()
.list()
def insert(pairing: Pairing) =
coll.insert.one {
pairingHandler.write(pairing) ++ $doc("d" -> DateTime.now)
}.void
def finish(g: lila.game.Game) =
if (g.aborted) coll.delete.one($id(g.id)).void
else
coll.update
.one(
$id(g.id),
$set(
"s" -> g.status.id,
"w" -> g.winnerColor.map(_.white),
"t" -> g.turns
)
)
.void
def setBerserk(pairing: Pairing, userId: User.ID) = {
if (pairing.user1 == userId) "b1".some
else if (pairing.user2 == userId) "b2".some
else none
} ?? { field =>
coll.update
.one(
$id(pairing.id),
$set(field -> true)
)
.void
}
def sortedCursor(
tournamentId: Tournament.ID,
userId: Option[User.ID],
batchSize: Int = 0,
readPreference: ReadPreference = ReadPreference.secondaryPreferred
): AkkaStreamCursor[Pairing] =
coll
.find(selectTour(tournamentId) ++ userId.??(selectUser))
.sort(recentSort)
.batchSize(batchSize)
.cursor[Pairing](readPreference)
private[tournament] def rawStats(tourId: Tournament.ID): Fu[List[Bdoc]] = {
coll.aggregateList(maxDocs = 3) { framework =>
import framework._
Match(selectTour(tourId)) -> List(
Project(
$doc(
"_id" -> false,
"w" -> true,
"t" -> true,
"b1" -> $doc("$cond" -> $arr("$b1", 1, 0)),
"b2" -> $doc("$cond" -> $arr("$b2", 1, 0))
)
),
GroupField("w")(
"games" -> SumAll,
"moves" -> SumField("t"),
"b1" -> SumField("b1"),
"b2" -> SumField("b2")
)
)
}
}
}
| luanlv/lila | modules/tournament/src/main/PairingRepo.scala | Scala | mit | 6,774 |
package org.workcraft.tasks
import scalaz.Scalaz
import scalaz.Scalaz._
import org.workcraft.scala.effects.IO._
import org.workcraft.scala.effects.IO
import Task._
object Test {
def step1 : Task[Unit, Nothing] = Task( tc => {
System.out.print("Step 1")
Range(0,30).foreach( x => {
Thread.sleep(100)
System.out.print (".")
tc.progressUpdate(0.4).unsafePerformIO
})
System.out.println
Right(())
}.pure[IO])
def step2 : Task[Unit, Nothing] = Task({
System.out.print("Step 2")
Range(0,30).foreach( x => {
Thread.sleep(100)
System.out.print (".")
})
System.out.println
Right(())
}.pure[IO])
def step3 : Task[Unit, Nothing] = Task({
System.out.print("Step 3")
Range(0,30).foreach( x => {
Thread.sleep(100)
System.out.print (".")
})
System.out.println
Right(())
}.pure[IO])
def progressUpdate (progress: Double) = {
System.out.println (progress)
}.pure[IO]
def statusUpdate (status: String) = {
System.out.println ("Now doing: " + status)
}.pure[IO]
def main(args: Array[String]) : Unit = {
var cancelled = false
val myComplexTask = for {
_ <- ioTask (statusUpdate ("step 1"));
x <- step1;
_ <- ioTask (statusUpdate ("step 2"));
y <- step2;
_ <- ioTask (statusUpdate ("step 3"));
z <- step3
} yield z
myComplexTask.runTask(TaskControl(cancelled.pure, Test.progressUpdate(_), _ => ioPure.pure {} )).unsafePerformIO
}
} | tuura/workcraft-2.2 | Tasks/src/main/scala/org/workcraft/tasks/Test.scala | Scala | gpl-3.0 | 1,544 |
import sbt._
import Keys._
object BuildSettings {
val ScalaVersion = "2.11.1"
val buildSettings = Defaults.coreDefaultSettings ++ Seq(
organization := "com.softwaremill.scalaval",
version := "0.2-SNAPSHOT",
scalaVersion := ScalaVersion,
crossScalaVersions := Seq("2.10.2", "2.11.1"),
// Sonatype OSS deployment
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (version.value.trim.endsWith("SNAPSHOT"))
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
},
publishMavenStyle := true,
publishArtifact in Test := false,
pomIncludeRepository := { _ => false },
pomExtra :=
<scm>
<url>[email protected]:mostr/scalaval.git</url>
<connection>scm:git:[email protected]:mostr/scalaval.git</connection>
</scm>
<developers>
<developer>
<id>mostr</id>
<name>Michał Ostruszka</name>
<url>http://michalostruszka.pl</url>
</developer>
</developers>,
licenses := ("Apache2", new java.net.URL("http://www.apache.org/licenses/LICENSE-2.0.txt")) :: Nil,
homepage := Some(new java.net.URL("http://michalostruszka.pl"))
)
}
object ScalavalBuild extends Build {
import BuildSettings._
resolvers += "Sonatype snapshots" at "https://oss.sonatype.org/content/repositories/snapshots/"
lazy val scalaval: Project = Project(
"scalaval",
file("."),
settings = buildSettings ++ Seq(
libraryDependencies ++= Seq(
"org.scalatest" %% "scalatest" % "2.2.0" % "test"
)
)
)
}
| mostr/scalaval | project/Build.scala | Scala | apache-2.0 | 1,669 |
Subsets and Splits