code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package example
import java.sql.Timestamp
import org.apache.spark.sql.test._
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.Gen._
import org.scalacheck.Prop.{exists, forAll}
import org.scalatest.FunSuite
import org.scalatest.prop.Checkers._
/**
* Created by jianshuang on 3/11/15.
*/
class ExampleTest extends FunSuite {
import example.Example._
implicit def sqlc = TestSQLContext
test("randomDataset has correct #rows") {
val d = randomDataset(10)
// d.collect().foreach(println)
assert(d.collect().size === 10)
assert(randomDataset(0).collect().size === 0)
}
test("randomInt properties: from is inclusive, to is exclusive") {
val propInclusiveFrom = exists { (from: Int) =>
randomInt(from, from + 10) == from
randomInt(from, from + 1) == from
randomInt(from, from + 0) == from
randomInt(from, from + -1) == from
randomInt(from, from + -10) == from
}
val propExclusiveTo = forAll { (from: Int, to: Int) =>
if (from != to)
randomInt(from, to) != to
else
randomInt(from, to) == to
}
check(propInclusiveFrom)
check(propExclusiveTo)
}
val datasetGen =
for (
id <- arbitrary[String];
ts <- choose(0L, System.currentTimeMillis());
v <- arbitrary[Double];
tag <- choose(1, 4)
) yield RandomRow(id, new Timestamp(ts), v, tag, "")
test("tag range is in range") {
check(
forAll(datasetGen) { d =>
d.tag >= 1 && d.tag < 5
})
}
}
| anand-singh/spark-sample-project | src/test/scala/ExampleTest.scala | Scala | apache-2.0 | 1,522 |
import com.amazonaws.services.{dynamodbv2 => aws}
import org.scalatest._
import org.slf4j._
import scala.util.Try
class DynamoDBV2Spec extends FlatSpec with Matchers {
behavior of "DynamoDB"
val log = LoggerFactory.getLogger(this.getClass)
val cond = DynamoDBCondition
it should "provide cool APIs for Hash PK tables" in {
implicit val dynamoDB = DynamoDB.local()
val tableName = s"Companies_${System.currentTimeMillis}"
val createdTableMeta: TableMeta = dynamoDB.createTable(
name = tableName,
hashPK = "Id" -> AttributeType.String
)
log.info(s"Created Table: ${createdTableMeta}")
println(s"Waiting for DynamoDB table activation...")
var isTableActivated = false
while (!isTableActivated) {
dynamoDB.describe(createdTableMeta.table).map { meta =>
isTableActivated = meta.status == aws.model.TableStatus.ACTIVE
}
Thread.sleep(1000L)
print(".")
}
println("")
println(s"Created DynamoDB table has been activated.")
val companies: Table = dynamoDB.table(tableName).get
companies.put("Amazon", "url" -> "http://www.amazon.com/")
companies.put("Google", "url" -> "http://www.google.com/")
companies.put("Microsoft")
// get by primary key
val google: Option[Item] = companies.get("Google")
google.get.attributes.find(_.name == "url").get.value.s.get should equal("http://www.google.com/")
val nonExistant: Option[Item] = companies.get("I Don't Exist")
nonExistant.isDefined should not be true
// batch get
val batchedCompanies: Seq[Item] = companies.batchGet(List(("Id", "Google"), ("Id", "Microsoft")))
batchedCompanies.size should equal(2)
batchedCompanies.map(item => item.attributes.find(_.name == "Id").get.value.s.get.equals("Google")
|| item.attributes.find(_.name == "Id").get.value.s.get.equals("Microsoft")) should equal(Seq(true, true))
val batchedNonExistant: Seq[Item] = companies.batchGet(List(("Id", "I Don't Exist"), ("Id", "Neither Do I")))
batchedNonExistant.size should equal(0)
// scan
val foundCompanies: Seq[Item] = companies.scan(Seq("url" -> cond.isNotNull))
foundCompanies.size should equal(2)
val scanNonExistant: Seq[Item] = companies.scan(Seq("url" -> cond.eq("I Don't Exist")))
scanNonExistant.size should equal(0)
// putAttributes
companies.putAttributes("Microsoft", Seq("url" -> "http://www.microsoft.com"))
companies.get("Microsoft").get.attributes.find(_.name == "url").get.value.s.get should equal("http://www.microsoft.com")
companies.destroy()
}
it should "provide cool APIs for Hash/Range PK tables" in {
implicit val dynamoDB = DynamoDB.local()
val tableName = s"Members_${System.currentTimeMillis}"
val createdTableMeta: TableMeta = dynamoDB.createTable(
name = tableName,
hashPK = "Id" -> AttributeType.Number,
rangePK = "Country" -> AttributeType.String,
otherAttributes = Seq("Company" -> AttributeType.String),
indexes = Seq(
LocalSecondaryIndex(
name = "CompanyIndex",
keySchema = Seq(KeySchema("Id", KeyType.Hash), KeySchema("Company", KeyType.Range)),
projection = Projection(ProjectionType.Include, Seq("Company"))
)
)
)
log.info(s"Created Table: ${createdTableMeta}")
println(s"Waiting for DynamoDB table activation...")
var isTableActivated = false
while (!isTableActivated) {
dynamoDB.describe(createdTableMeta.table).map { meta =>
isTableActivated = meta.status == aws.model.TableStatus.ACTIVE
}
Thread.sleep(1000L)
print(".")
}
println("")
println(s"Created DynamoDB table has been activated.")
val members: Table = dynamoDB.table(tableName).get
members.put(1, "Japan", "Name" -> "Alice", "Age" -> 23, "Company" -> "Google")
members.put(2, "U.S.", "Name" -> "Bob", "Age" -> 36, "Company" -> "Google")
members.put(3, "Japan", "Name" -> "Chris", "Age" -> 29, "Company" -> "Amazon")
val nonExistant: Option[Item] = members.get(4, "U.K.")
nonExistant.isDefined should not be true
val googlers: Seq[Item] = members.scan(Seq("Company" -> cond.eq("Google")))
googlers.flatMap(_.attributes.find(_.name == "Name").map(_.value.s.get)) should equal(Seq("Bob", "Alice"))
val scanNonExistant: Seq[Item] = members.scan(Seq("Company" -> cond.eq("I Don't Exist")))
scanNonExistant.size should equal(0)
// putAttributes
members.putAttributes(3, "Japan", Seq("Company" -> "Microsoft"))
members.get(3, "Japan").get.attributes.find(_.name == "Company").get.value.s.get should equal("Microsoft")
val exp = DynamoDBExpectedAttributeValue
Try(dynamoDB.putConditional(tableName, "Id" -> 3, "Country" -> "Japan",
"Name" -> "Kris")(Seq("Age" -> exp.lt(29)))) should be a 'failure
Try(dynamoDB.putConditional(tableName, "Id" -> 3, "Country" -> "Japan",
"Name" -> "Kris")(Seq("Age" -> exp.lt(30)))) should be a 'success
members.destroy()
}
it should "convert maps to attribute values implicitly" in {
implicit val dynamoDB = DynamoDB.local()
val tableName = s"Members_${System.currentTimeMillis}"
val createdTableMeta: TableMeta = dynamoDB.createTable(
name = tableName,
hashPK = "Id" -> AttributeType.Number,
rangePK = "Country" -> AttributeType.String,
otherAttributes = Seq("Company" -> AttributeType.String),
indexes = Seq(
LocalSecondaryIndex(
name = "CompanyIndex",
keySchema = Seq(KeySchema("Id", KeyType.Hash), KeySchema("Company", KeyType.Range)),
projection = Projection(ProjectionType.Include, Seq("Company"))
)
)
)
log.info(s"Created Table: ${createdTableMeta}")
println(s"Waiting for DynamoDB table activation...")
var isTableActivated = false
while (!isTableActivated) {
dynamoDB.describe(createdTableMeta.table).map { meta =>
isTableActivated = meta.status == aws.model.TableStatus.ACTIVE
}
Thread.sleep(1000L)
print(".")
}
println("")
println(s"Created DynamoDB table has been activated.")
val members: Table = dynamoDB.table(tableName).get
members.put(1, "Japan", "Name" -> Map("foo" -> Map("bar" -> "brack")), "Age" -> 23, "Company" -> "Google")
members.get(1, "Japan").get.attributes.find(_.name == "Name").get.value.m.get.get("foo").getM().get("bar").getS() should equal("brack")
members.put(2, "Micronesia", "Name" -> Map("aliases" -> List("foo", "bar", "other")), "Age" -> 26, "Company" -> "Spotify")
members.get(2, "Micronesia").get.attributes.find(_.name == "Name").get.value.m.get.get("aliases").getSS() should contain allOf ("foo", "bar", "other")
}
it should "convert list of maps to attribute values implicitly" in {
implicit val dynamoDB = DynamoDB.local()
val tableName = s"Members_${System.currentTimeMillis}"
val createdTableMeta: TableMeta = dynamoDB.createTable(
name = tableName,
hashPK = "Id" -> AttributeType.Number,
rangePK = "Country" -> AttributeType.String,
otherAttributes = Seq("Company" -> AttributeType.String),
indexes = Seq(
LocalSecondaryIndex(
name = "CompanyIndex",
keySchema = Seq(KeySchema("Id", KeyType.Hash), KeySchema("Company", KeyType.Range)),
projection = Projection(ProjectionType.Include, Seq("Company"))
)
)
)
log.info(s"Created Table: ${createdTableMeta}")
println(s"Waiting for DynamoDB table activation...")
var isTableActivated = false
while (!isTableActivated) {
dynamoDB.describe(createdTableMeta.table).map { meta =>
isTableActivated = meta.status == aws.model.TableStatus.ACTIVE
}
Thread.sleep(1000L)
print(".")
}
println("")
println(s"Created DynamoDB table has been activated.")
val members: Table = dynamoDB.table(tableName).get
members.put(1, "Japan", "Name" -> List(Map("bar" -> "brack")), "Age" -> 23, "Company" -> "Google")
}
it should "provide cool APIs to use global secondary index" in {
implicit val dynamoDB = DynamoDB.local()
val tableName = s"Users_${System.currentTimeMillis}"
val globalSecondaryIndex = GlobalSecondaryIndex(
name = "SexIndex",
keySchema = Seq(KeySchema("Sex", KeyType.Hash), KeySchema("Age", KeyType.Range)),
projection = Projection(ProjectionType.All),
provisionedThroughput = ProvisionedThroughput(readCapacityUnits = 10, writeCapacityUnits = 10)
)
val table = Table(
name = tableName,
hashPK = "Id",
attributes = Seq(
AttributeDefinition("Id", AttributeType.Number),
AttributeDefinition("Sex", AttributeType.String),
AttributeDefinition("Age", AttributeType.Number)
),
globalSecondaryIndexes = Seq(globalSecondaryIndex)
)
val createdTableMeta: TableMeta = dynamoDB.createTable(table)
log.info(s"Created Table: ${createdTableMeta}")
println(s"Waiting for DynamoDB table activation...")
var isTableActivated = false
while (!isTableActivated) {
dynamoDB.describe(createdTableMeta.table).map { meta =>
isTableActivated = meta.status == aws.model.TableStatus.ACTIVE
}
Thread.sleep(1000L)
print(".")
}
println("")
println(s"Created DynamoDB table has been activated.")
val users: Table = dynamoDB.table(tableName).get
users.put(1, "Name" -> "John", "Sex" -> "Male", "Age" -> 12)
users.put(2, "Name" -> "Bob", "Sex" -> "Male", "Age" -> 14, "Friend" -> true)
users.put(3, "Name" -> "Chris", "Sex" -> "Female", "Age" -> 9)
users.put(4, "Name" -> "Michael", "Sex" -> "Male", "Age" -> 65)
val teenageBoys: Seq[Item] = users.queryWithIndex(
index = globalSecondaryIndex,
keyConditions = Seq("Sex" -> cond.eq("Male"), "Age" -> cond.lt(20)),
limit = 1, // to test that we still return 2 names
pageStatsCallback = println
)
teenageBoys.flatMap(_.attributes.find(_.name == "Name").map(_.value.s.get)) should equal(Seq("John", "Bob"))
teenageBoys.flatMap(_.attributes.find(_.name == "Friend")).map(_.value.bl.get) should equal(Seq(true))
users.destroy()
}
it should "support paging for table scans" in {
implicit val dynamoDB = DynamoDB.local()
val tableName = s"Cities_${System.currentTimeMillis}"
val createdTableMeta: TableMeta = dynamoDB.createTable(
name = tableName,
hashPK = "Id" -> AttributeType.Number,
rangePK = "Country" -> AttributeType.String,
otherAttributes = Seq(),
indexes = Seq()
)
log.info(s"Created Table: ${createdTableMeta}")
println(s"Waiting for DynamoDB table activation...")
var isTableActivated = false
while (!isTableActivated) {
dynamoDB.describe(createdTableMeta.table).map { meta =>
isTableActivated = meta.status == aws.model.TableStatus.ACTIVE
}
Thread.sleep(1000L)
print(".")
}
println("")
println(s"Created DynamoDB table has been activated.")
val cities: Table = dynamoDB.table(tableName).get
cities.put(1, "China", "Name" -> "Beijing", "Population" -> 21516000)
cities.put(2, "Egypt", "Name" -> "Cairo", "Population" -> 9278441)
cities.put(3, "India", "Name" -> "Delhi", "Population" -> 16787941)
cities.put(4, "China", "Name" -> "Guangzhou", "Population" -> 9865702)
cities.put(5, "Turkey", "Name" -> "Istanbul", "Population" -> 14657000)
cities.put(6, "Indonesia", "Name" -> "Jakarta", "Population" -> 10075310)
cities.put(7, "Pakistan", "Name" -> "Karachi", "Population" -> 21000000)
cities.put(8, "Democratic Republic of the Congo", "Name" -> "Kinshasa", "Population" -> 9735000)
cities.put(9, "Nigeria", "Name" -> "Lagos", "Population" -> 16060303)
cities.put(10, "Peru", "Name" -> "Lima", "Population" -> 8693387)
cities.put(11, "United Kingdom", "Name" -> "London", "Population" -> 8538689)
cities.put(12, "Mexico", "Name" -> "Mexico City", "Population" -> 8874724)
cities.put(13, "Russia", "Name" -> "Moscow", "Population" -> 12197596)
cities.put(14, "India", "Name" -> "Mumbai", "Population" -> 12478447)
cities.put(15, "United States", "Name" -> "New York", "Population" -> 8491079)
cities.put(16, "South Korea", "Name" -> "Seoul", "Population" -> 10048593)
cities.put(17, "China", "Name" -> "Shanghai", "Population" -> 24256800)
cities.put(18, "China", "Name" -> "Shenzhen", "Population" -> 10780000)
cities.put(19, "Brazil", "Name" -> "São Paulo", "Population" -> 21292893)
cities.put(20, "Japan", "Name" -> "Tokyo", "Population" -> 13297629)
// set up a closure with page stats, and a way to reset and add to that
var pages = 0
var scanned = 0
var found = 0
def resetCounts = {
pages = 0
scanned = 0
found = 0
}
def addPageCounts(page: PageStats) = {
pages += 1
scanned += page.scanned
found += page.items
}
// a limit of 2, with 20 items, will divide into 10 pages
// (and need 11 page fetches since DynamoDB needs to fetch an additional page to find out there was no more data)
// a filter of population > 20M should return 4/20 cities, so at least 7 pages will have no matching results
val huge1: Seq[Item] = cities.scan(Seq("Population" -> cond.gt(20000000)), limit = 2, pageStatsCallback = addPageCounts)
huge1.flatMap(_.attributes.find(_.name == "Name").map(_.value.s.get)) should contain only ("Karachi", "Beijing", "São Paulo", "Shanghai")
pages should be(11)
scanned should be(20)
found should be(4)
resetCounts
// a limit of 3, with 20 items, will divide into 7 pages
// (and need 7 page fetches as the last page is partial so DynamoDB can tell it's done)
// a filter of population > 20M should return 4/20 cities, so at least 3 pages will have no matching results
val huge2: Seq[Item] = cities.scan(Seq("Population" -> cond.gt(20000000)), limit = 3, pageStatsCallback = addPageCounts)
huge2.flatMap(_.attributes.find(_.name == "Name").map(_.value.s.get)) should contain only ("Beijing", "Karachi", "Shanghai", "São Paulo")
pages should be(7)
scanned should be(20)
found should be(4)
resetCounts
// a filter of population > 2 should return 20/20 cities, and a limit of 101 gives all results on a single page
val all1: Seq[Item] = cities.scan(Seq("Population" -> cond.gt(2)), limit = 101, pageStatsCallback = addPageCounts)
all1.size should be(20)
pages should be(1)
scanned should be(20)
found should be(20)
resetCounts
// but if you only take a few items from the sequence, it shouldn't fetch more pages than needed
val all1b: Seq[Item] = cities.scan(Seq("Population" -> cond.gt(2)), limit = 3, pageStatsCallback = addPageCounts)
val List(first, second) = all1b.take(2).flatMap(_.attributes.find(_.name == "Name").map(_.value.s.get)).toList
pages should be(1) // it should only fetch a single page
scanned should be(3) // but it would scan the entire page,
found should be(3) // and find every match on the page, even though we just asked for one (from that page)
resetCounts
// a filter of population > 2 should return 20/20 cities, and a limit of 11 gives two pages with results on both
val all2: Seq[Item] = cities.scan(Seq("Population" -> cond.gt(2)), limit = 11, pageStatsCallback = addPageCounts)
all2.size should be(20)
pages should be(2)
scanned should be(20)
found should be(20)
resetCounts
// the same query should work fine without a callback
val all3: Seq[Item] = cities.scan(Seq("Population" -> cond.gt(2)), limit = 11, pageStatsCallback = addPageCounts)
all3.size should be(20)
cities.destroy()
}
it should "support paging for table queries" in {
implicit val dynamoDB = DynamoDB.local()
val tableName = s"Cities_${System.currentTimeMillis}"
val createdTableMeta: TableMeta = dynamoDB.createTable(
name = tableName,
hashPK = "Country" -> AttributeType.String,
rangePK = "Population" -> AttributeType.Number,
otherAttributes = Seq(),
indexes = Seq()
)
log.info(s"Created Table: ${createdTableMeta}")
println(s"Waiting for DynamoDB table activation...")
var isTableActivated = false
while (!isTableActivated) {
dynamoDB.describe(createdTableMeta.table).map { meta =>
isTableActivated = meta.status == aws.model.TableStatus.ACTIVE
}
Thread.sleep(1000L)
print(".")
}
println("")
println(s"Created DynamoDB table has been activated.")
val cities: Table = dynamoDB.table(tableName).get
cities.put("China", 21516000, "Name" -> "Beijing")
cities.put("Egypt", 9278441, "Name" -> "Cairo")
cities.put("India", 16787941, "Name" -> "Delhi")
cities.put("China", 9865702, "Name" -> "Guangzhou")
cities.put("Turkey", 14657000, "Name" -> "Istanbul")
cities.put("Indonesia", 10075310, "Name" -> "Jakarta")
cities.put("Pakistan", 21000000, "Name" -> "Karachi")
cities.put("Democratic Republic of the Congo", 9735000, "Name" -> "Kinshasa")
cities.put("Nigeria", 16060303, "Name" -> "Lagos")
cities.put("Peru", 8693387, "Name" -> "Lima")
cities.put("United Kingdom", 8538689, "Name" -> "London")
cities.put("Mexico", 8874724, "Name" -> "Mexico City")
cities.put("Russia", 12197596, "Name" -> "Moscow")
cities.put("India", 12478447, "Name" -> "Mumbai")
cities.put("United States", 8491079, "Name" -> "New York")
cities.put("South Korea", 10048593, "Name" -> "Seoul")
cities.put("China", 24256800, "Name" -> "Shanghai")
cities.put("China", 10780000, "Name" -> "Shenzhen")
cities.put("Brazil", 21292893, "Name" -> "São Paulo")
cities.put("Japan", 13297629, "Name" -> "Tokyo")
// set up a closure with page stats, and a way to reset and add to that
var pages = 0
var scanned = 0
var found = 0
def resetCounts = {
pages = 0
scanned = 0
found = 0
}
def addPageCounts(page: PageStats) = {
pages += 1
scanned += page.scanned
found += page.items
}
// a limit of 1, with 2 matching Chinese cities, will divide into 2 pages
// (and need 3 page fetches since DynamoDB needs to fetch an additional page to find out there was no more data)
// a filter of population > 20M should return 2 matching Chinese cities
val hugeChinese1: Seq[Item] = cities.query(Seq("Country" -> cond.eq("China"), "Population" -> cond.gt(20000000)), limit = 1, pageStatsCallback = addPageCounts)
hugeChinese1.flatMap(_.attributes.find(_.name == "Name").map(_.value.s.get)) should contain only ("Beijing", "Shanghai")
pages should be(3)
scanned should be(2)
found should be(2)
resetCounts
// a limit of 1 will divide the 2 matching Chinese cities into 2 pages
// 5 pages will be fetched - one for each Chinese city, plus one for DynamoDB to find out there is no more data
val chineseStartingWithS: Seq[Item] = cities.filteringQuery(
Seq("Country" -> cond.eq("China")), Seq("Name" -> cond.beginsWith("S")),
limit = 1, pageStatsCallback = addPageCounts
)
chineseStartingWithS.flatMap(_.attributes.find(_.name == "Name").map(_.value.s.get)) should contain only ("Shenzhen", "Shanghai")
pages should be(5)
scanned should be(4)
found should be(2)
resetCounts
// a limit of 2, with 3 matching Chinese cities, will divide into 2 pages
// (and need 2 page fetches as the last page is partial so DynamoDB can tell it's done)
// a filter of population > 10M should return 3 matching Chinese cities
val hugeChinese2: Seq[Item] = cities.query(Seq("Country" -> cond.eq("China"), "Population" -> cond.gt(10000000)), limit = 2, pageStatsCallback = addPageCounts)
hugeChinese2.flatMap(_.attributes.find(_.name == "Name").map(_.value.s.get)) should contain only ("Shanghai", "Shenzhen", "Beijing")
pages should be(2)
scanned should be(3)
found should be(3)
resetCounts
// but if you only take a few items from the sequence, it shouldn't fetch more pages than needed
val hugeChinese2b: Seq[Item] = cities.query(Seq("Country" -> cond.eq("China"), "Population" -> cond.gt(10000000)), limit = 2, pageStatsCallback = addPageCounts)
hugeChinese2b.take(1).flatMap(_.attributes.find(_.name == "Name").map(_.value.s.get)) should contain oneOf ("Shanghai", "Shenzhen", "Beijing")
pages should be(1) // it should only fetch a single page
scanned should be(2) // but it would scan the entire page,
found should be(2) // and find every match on the page, even though we just asked for one (from that page)
resetCounts
// a filter of population > 2 should return 4 matching Chinese cities, and a limit of 11 gives all results on a single page
val allChinese1: Seq[Item] = cities.query(Seq("Country" -> cond.eq("China"), "Population" -> cond.gt(2)), limit = 11, pageStatsCallback = addPageCounts)
allChinese1.size should be(4)
pages should be(1)
scanned should be(4)
found should be(4)
resetCounts
// the same query should work fine without a callback
val allChinese2: Seq[Item] = cities.query(Seq("Country" -> cond.eq("China"), "Population" -> cond.gt(2)), limit = 11)
allChinese2.size should be(4)
cities.destroy()
}
it should "support count operations for table queries and scans" in {
implicit val dynamoDB = DynamoDB.local()
val tableName = s"Cities_${System.currentTimeMillis}"
val createdTableMeta: TableMeta = dynamoDB.createTable(
name = tableName,
hashPK = "Country" -> AttributeType.String,
rangePK = "Population" -> AttributeType.Number,
otherAttributes = Seq(),
indexes = Seq()
)
log.info(s"Created Table: ${createdTableMeta}")
println(s"Waiting for DynamoDB table activation...")
var isTableActivated = false
while (!isTableActivated) {
dynamoDB.describe(createdTableMeta.table).map { meta =>
isTableActivated = meta.status == aws.model.TableStatus.ACTIVE
}
Thread.sleep(1000L)
print(".")
}
println("")
println(s"Created DynamoDB table has been activated.")
val cities: Table = dynamoDB.table(tableName).get
cities.put("China", 21516000, "Name" -> "Beijing")
cities.put("Egypt", 9278441, "Name" -> "Cairo")
cities.put("India", 16787941, "Name" -> "Delhi")
cities.put("China", 9865702, "Name" -> "Guangzhou")
cities.put("Turkey", 14657000, "Name" -> "Istanbul")
cities.put("Indonesia", 10075310, "Name" -> "Jakarta")
cities.put("Pakistan", 21000000, "Name" -> "Karachi")
cities.put("Democratic Republic of the Congo", 9735000, "Name" -> "Kinshasa")
cities.put("Nigeria", 16060303, "Name" -> "Lagos")
cities.put("Peru", 8693387, "Name" -> "Lima")
cities.put("United Kingdom", 8538689, "Name" -> "London")
cities.put("Mexico", 8874724, "Name" -> "Mexico City")
cities.put("Russia", 12197596, "Name" -> "Moscow")
cities.put("India", 12478447, "Name" -> "Mumbai")
cities.put("United States", 8491079, "Name" -> "New York")
cities.put("South Korea", 10048593, "Name" -> "Seoul")
cities.put("China", 24256800, "Name" -> "Shanghai")
cities.put("China", 10780000, "Name" -> "Shenzhen")
cities.put("Brazil", 21292893, "Name" -> "São Paulo")
cities.put("Japan", 13297629, "Name" -> "Tokyo")
val queryNbrChineseCities = cities.query(Seq("Country" -> cond.eq("China")), aws.model.Select.COUNT)
queryNbrChineseCities.map(item => item.attributes.find(_.name == "Count").get.value.n.get.toInt).head should be(4)
val queryNbrIndianCities = cities.query(Seq("Country" -> cond.eq("India")), aws.model.Select.COUNT)
queryNbrIndianCities.map(item => item.attributes.find(_.name == "Count").get.value.n.get.toInt).head should be(2)
val queryZeroCounts = cities.query(Seq("Country" -> cond.eq("Italy")), aws.model.Select.COUNT)
queryZeroCounts.map(item => item.attributes.find(_.name == "Count").get.value.n.get.toInt).head should be(0)
val scanAllCities = cities.scan(Seq("Population" -> cond.gt(2)), aws.model.Select.COUNT)
scanAllCities.map(item => item.attributes.find(_.name == "Count").get.value.n.get.toInt).head should be(20)
val scanNbrSmallCities = cities.scan(Seq("Population" -> cond.lt(10000000)), aws.model.Select.COUNT)
scanNbrSmallCities.map(item => item.attributes.find(_.name == "Count").get.value.n.get.toInt).head should be(7)
val scanZeroCounts = cities.scan(Seq("Population" -> cond.lt(0)), aws.model.Select.COUNT)
scanZeroCounts.map(item => item.attributes.find(_.name == "Count").get.value.n.get.toInt).head should be(0)
cities.destroy()
}
}
| hirokikonishi/awscala | aws/dynamo/src/test/scala/DynamoDBV2Spec.scala | Scala | apache-2.0 | 24,800 |
package net.combinatory.rtm
import org.scalatest.FunSuite
class TaskTests extends FunSuite {
test("tasks") {
//pending
//Http runMethod (Methods.taskGetList, Nil)
}
}
| comb/rtm-scala | src/test/scala/TaskTests.scala | Scala | apache-2.0 | 182 |
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2012-2014 Alexey Aksenov [email protected]
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS»,
* Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: [email protected]
*/
package org.digimead.tabuddy.desktop.logic.operation.api
import org.digimead.tabuddy.desktop.core.definition.api.XOperation
import org.digimead.tabuddy.desktop.logic.payload.api.XElementTemplate
import org.digimead.tabuddy.model.Model
import org.digimead.tabuddy.model.graph.Graph
/**
* OperationModifyElementTemplateList base trait.
*/
trait XOperationModifyElementTemplateList[T <: XElementTemplate.Generic] {
checkSubclass()
/**
* Modify an element template list.
*
* @param graph graph that contains a template list
* @param templateList exists templates
* @return the modified element template list
*/
def apply(graph: Graph[_ <: Model.Like], templateList: Set[T]): Set[T]
/**
* Create 'Modify an element template list' operation.
*
* @param graph graph that contains a template list
* @param templateList exists templates
* @return 'Modify an element template list' operation
*/
def operation(graph: Graph[_ <: Model.Like], templateList: Set[T]): XOperation[Set[T]]
/**
* Checks that this class can be subclassed.
* <p>
* The API class is intended to be subclassed only at specific,
* controlled point. This method enforces this rule
* unless it is overridden.
* </p><p>
* <em>IMPORTANT:</em> By providing an implementation of this
* method that allows a subclass of a class which does not
* normally allow subclassing to be created, the implementer
* agrees to be fully responsible for the fact that any such
* subclass will likely fail.
* </p>
*/
protected def checkSubclass(): Unit =
throw new IllegalAccessException("Please, use org.digimead.tabuddy.desktop.logic.operation.OperationModifyElementTemplateList instead.")
}
| digimead/digi-TABuddy-desktop | part-logic/src/main/scala/org/digimead/tabuddy/desktop/logic/operation/api/XOperationModifyElementTemplateList.scala | Scala | agpl-3.0 | 3,977 |
/* __ __ *\\
* / /____ ___ ____ ___ ___ _/ / lasius *
* / __/ -_) _ `/ _ \\/ _ \\/ _ `/ / contributed by tegonal *
* \\__/\\__/\\_, /\\___/_//_/\\_,_/_/ http://tegonal.com/ *
* /___/ *
* *
* This program is free software: you can redistribute it and/or modify it *
* under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, *
* or (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, but *
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for *
* more details. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program. If not, see http://www.gnu.org/licenses/ *
* *
\\* */
package controllers
import core.{DefaultCacheAware, DefaultSystemServicesAware, SystemServicesAware}
import domain.views.LatestUserTimeBookingsView._
import models.FreeUser
import play.api.mvc.Controller
import scala.concurrent.Future
class LatestUserTimeBookingsController {
self: Controller with Security with SystemServicesAware =>
def getLatestTimeBooking(maxHistory: Int) = HasRole(FreeUser, parse.empty) {
implicit subject =>
implicit request => {
systemServices.latestUserTimeBookingsViewService ! GetLatestTimeBooking(subject.userId, maxHistory)
Future.successful(Ok)
}
}
}
object LatestUserTimeBookingsController extends LatestUserTimeBookingsController with Controller with Security with DefaultSecurityComponent with DefaultCacheAware with DefaultSystemServicesAware | tegonal/lasius | app/controllers/LatestUserTimeBookingsController.scala | Scala | gpl-3.0 | 2,405 |
package org.example1_1.usage
import org.example1_1.declaration.{X, X4, X5, X6}
trait Usage3 {
val x: X = ???
val x4: X4 = ???
val x5: X5 = ???
val x6: X6 = ???
} | JetBrains/intellij-scala | scala/scala-impl/testdata/move/allInOne_1_MoveXYZ/before/org/example1_1/usage/Usage3.scala | Scala | apache-2.0 | 171 |
package io.neilord
import akka.actor.Props
import scala.reflect._
trait PropsProvider {
def getProps(args: Any*): Props
}
trait RealPropsProvider extends PropsProvider {
val ctag: ClassTag[_]
override def getProps(args: Any*) = {
Props(ctag.runtimeClass, args: _*)
}
} | NoOrdInaryGuy/akka-cereal | src/main/scala/io/neilord/PropsProvider.scala | Scala | apache-2.0 | 284 |
/*
* Copyright 2016 Carlo Micieli
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.hascalator
package typeclasses
import Prelude._
/** @author Carlo Micieli
* @since 0.0.1
*/
sealed trait Ordering
object Ordering {
case object EQ extends Ordering
case object LT extends Ordering
case object GT extends Ordering
def apply[A](cmp: (A, A) => Int): (A, A) => Ordering = {
(x, y) =>
{
val i = cmp(x, y)
i match {
case 0 => EQ
case n if n < 0 => LT
case n if n > 0 => GT
}
}
}
} | CarloMicieli/hascalator | core/src/main/scala/io/hascalator/typeclasses/Ordering.scala | Scala | apache-2.0 | 1,095 |
package frameless
import org.scalacheck.Prop
import org.scalacheck.Prop._
class AsTests extends TypedDatasetSuite {
test("as[X2[A, B]]") {
def prop[A, B](data: Vector[(A, B)])(
implicit
eab: TypedEncoder[(A, B)],
ex2: TypedEncoder[X2[A, B]]
): Prop = {
val dataset = TypedDataset.create(data)
val dataset2 = dataset.as[X2[A,B]]().collect().run().toVector
val data2 = data.map { case (a, b) => X2(a, b) }
dataset2 ?= data2
}
check(forAll(prop[Int, Int] _))
check(forAll(prop[String, String] _))
check(forAll(prop[String, Int] _))
check(forAll(prop[Long, Int] _))
check(forAll(prop[Seq[Seq[Option[Seq[Long]]]], Seq[Int]] _))
check(forAll(prop[Seq[Option[Seq[String]]], Seq[Int]] _))
}
test("as[X2[X2[A, B], C]") {
def prop[A, B, C](data: Vector[(A, B, C)])(
implicit
eab: TypedEncoder[((A, B), C)],
ex2: TypedEncoder[X2[X2[A, B], C]]
): Prop = {
val data2 = data.map {
case (a, b, c) => ((a, b), c)
}
val dataset = TypedDataset.create(data2)
val dataset2 = dataset.as[X2[X2[A,B], C]]().collect().run().toVector
val data3 = data2.map { case ((a, b), c) => X2(X2(a, b), c) }
dataset2 ?= data3
}
check(forAll(prop[String, Int, Int] _))
check(forAll(prop[String, Int, String] _))
check(forAll(prop[String, String, Int] _))
check(forAll(prop[Long, Int, String] _))
check(forAll(prop[Seq[Seq[Option[Seq[Long]]]], Seq[Int], Option[Seq[Option[Int]]]] _))
check(forAll(prop[Seq[Option[Seq[String]]], Seq[Int], Seq[Option[String]]] _))
}
}
| adelbertc/frameless | dataset/src/test/scala/frameless/AsTests.scala | Scala | apache-2.0 | 1,621 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package statements
import com.intellij.psi.PsiClass
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScTypeParam
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypeParametersOwner
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScClass, ScObject, ScTrait}
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.TypeParameterType
import org.jetbrains.plugins.scala.lang.psi.types.result.{Failure, TypeResult, TypingContext}
import org.jetbrains.plugins.scala.macroAnnotations.{CachedInsidePsiElement, ModCount}
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
trait ScTypeAliasDefinition extends ScTypeAlias {
override def isDefinition: Boolean = true
def aliasedTypeElement: Option[ScTypeElement]
def aliasedType(ctx: TypingContext = TypingContext.empty): TypeResult[ScType] = {
if (ctx.visited.contains(this)) {
new Failure(ScalaBundle.message("circular.dependency.detected", name), Some(this)) {
override def isCyclic = true
}
} else {
aliasedTypeElement.map {
_.getType(ctx(this))
}.getOrElse(Failure("No alias type", Some(this)))
}
}
@CachedInsidePsiElement(this, ModCount.getBlockModificationCount)
def aliasedType: TypeResult[ScType] = aliasedType()
def lowerBound: TypeResult[ScType] = aliasedType()
def upperBound: TypeResult[ScType] = aliasedType()
def isExactAliasFor(cls: PsiClass): Boolean = {
val isDefinedInObject = containingClass match {
case obj: ScObject if obj.isStatic => true
case _ => false
}
isDefinedInObject && isAliasFor(cls)
}
def isAliasFor(cls: PsiClass): Boolean = {
if (cls.getTypeParameters.length != typeParameters.length) false
else if (cls.hasTypeParameters) {
val typeParamsAreAppliedInOrderToCorrectClass = aliasedType.getOrAny match {
case pte: ScParameterizedType =>
val refersToClass = pte.designator.equiv(ScalaType.designator(cls))
val typeParamsAppliedInOrder = (pte.typeArguments corresponds typeParameters) {
case (tpt: TypeParameterType, tp) if tpt.psiTypeParameter == tp => true
case _ => false
}
refersToClass && typeParamsAppliedInOrder
case _ => false
}
val varianceAndBoundsMatch = cls match {
case sc0@(_: ScClass | _: ScTrait) =>
val sc = sc0.asInstanceOf[ScTypeParametersOwner]
(typeParameters corresponds sc.typeParameters) {
case (tp1, tp2) => tp1.variance == tp2.variance && tp1.upperBound == tp2.upperBound && tp1.lowerBound == tp2.lowerBound &&
tp1.contextBound.isEmpty && tp2.contextBound.isEmpty && tp1.viewBound.isEmpty && tp2.viewBound.isEmpty
}
case _ => // Java class
(typeParameters corresponds cls.getTypeParameters) {
case (tp1, tp2) => tp1.variance == ScTypeParam.Invariant && tp1.upperTypeElement.isEmpty && tp2.getExtendsListTypes.isEmpty &&
tp1.lowerTypeElement.isEmpty && tp1.contextBound.isEmpty && tp1.viewBound.isEmpty
}
}
typeParamsAreAppliedInOrderToCorrectClass && varianceAndBoundsMatch
}
else {
val clsType = ScalaType.designator(cls)
typeParameters.isEmpty && aliasedType.getOrElse(return false).equiv(clsType)
}
}
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/api/statements/ScTypeAliasDefinition.scala | Scala | apache-2.0 | 3,534 |
package com.malaska.spark.training.windowing.superbig
import org.apache.log4j.{Level, Logger}
import org.apache.spark.Partitioner
import org.apache.spark.sql.SparkSession
object SuperBigWindowing {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
def main(args: Array[String]): Unit = {
val jsonPath = args(0)
val pageSize = args(1).toInt
val spark = SparkSession.builder
.master("local")
.appName("my-spark-app")
.config("spark.some.config.option", "config-value")
.config("spark.driver.host","127.0.0.1")
.getOrCreate()
val jsonDf = spark.read.json(jsonPath)
import spark.implicits._
val diffDs = jsonDf.flatMap(row => {
val group = row.getAs[String]("group")
val time = row.getAs[Long]("time")
val value = row.getAs[Long]("value")
val timePage = time / pageSize
if (time % pageSize == 0) { //Am I on the edge of the page
Seq((timePage, (time, value)), (timePage + 1, (time, value)))
} else {
Seq((timePage, (time, value)))
}
}).groupByKey(r => r._1).flatMapGroups((k, it) => {
var lastValue = 0l
it.toSeq.
sortBy{case (page, (time, value)) => time}.
map{case (page, (time, value)) =>
val dif = value - lastValue
lastValue = value
(time, value, dif)
}
})
diffDs.collect().foreach(r => println(" - " + r))
spark.stop()
}
}
| TedBear42/spark_training | src/main/scala/com/malaska/spark/training/windowing/superbig/SuperBigWindowing.scala | Scala | apache-2.0 | 1,475 |
package teststate.run
import japgolly.microlibs.name_fn._
import teststate.core.CoreExports._
import teststate.core.CoreExports2._
import teststate.core._
import teststate.data._
import teststate.typeclass._
sealed abstract class PlanLike[F[_], R, O, S, E, This] { self: This =>
type Self[FF[_], RR, OO, SS, EE]
// This causes scalac to produce stupid, spurious errors on the F type.
// final type This = Self[F, R, O, S, E]
protected def plan: Plan[F, R, O, S, E]
protected def setPlan(plan: Plan[F, R, O, S, E]): This
def name : Option[Name] = plan.name
def actions : Actions[F, R, O, S, E] = plan.actions
def invariants: Invariants[O, S, E] = plan.invariants
implicit val executionModel: ExecutionModel[F] = plan.executionModel
// def trans[G[_]: ExecutionModel](t: F ~~> G): Self[G, R, O, S, E]
// def mapR[R2](f: R2 => R): Self[F, R2, O, S, E]
// def pmapR[R2](f: R2 => E Or R): Self[F, R2, O, S, E]
// def pmapO[OO](g: OO => E Or O): Self[F, R, OO, S, E]
// def mapS[SS](g: SS => S)(s: (SS, S) => SS): Self[F, R, O, SS, E]
// def mapE[EE](f: E => EE): Self[F, R, O, S, EE]
// def lift[F2[_], R2, O2, S2, E2](implicit t: Transformer[F, R, O, S, E, F2, R2, O2, S2, E2]): Self[F2, R2, O2, S2, E2]
final protected def modPlan(f: Plan[F, R, O, S, E] => Plan[F, R, O, S, E]): This =
setPlan(f(plan))
final def setActions(actions: Actions[F, R, O, S, E]): This =
modPlan(p => new Plan(p.name, actions, p.invariants))
final def setInvariants(invariants: Invariants[O, S, E]): This =
modPlan(p => new Plan(p.name, p.actions, invariants))
final def clearName: This =
modPlan(p => new Plan(None, p.actions, p.invariants))
final def named(name: Name): This =
modPlan(p => new Plan(Some(name), p.actions, p.invariants))
final def modActions(f: Actions[F, R, O, S, E] => Actions[F, R, O, S, E]): This =
setActions(f(actions))
final def modInvariants(f: Invariants[O, S, E] => Invariants[O, S, E]): This =
setInvariants(f(invariants))
def addInvariants(i: Invariants[O, S, E]): This =
modInvariants(_ & i)
// TODO asAction that uses this.name
final def asAction(name: NameFn[ROS[R, O, S]]): Actions[F, R, O, S, E] =
Action.liftInner(Action.SubTest(actions, invariants))(name)
}
// █████████████████████████████████████████████████████████████████████████████████████████████████████████████████████
object Plan {
def apply[F[_], R, O, S, E](a: Actions[F, R, O, S, E], i: Invariants[O, S, E])(implicit em: ExecutionModel[F]): Plan[F, R, O, S, E] =
new Plan(None, a, i)(em)
def empty[F[_], R, O, S, E](implicit em: ExecutionModel[F]): Plan[F, R, O, S, E] =
apply[F, R, O, S, E](Empty.instance, Empty.instance)
implicit def emptyInstance[F[_], R, O, S, E](implicit em: ExecutionModel[F]): Empty[Plan[F, R, O, S, E]] =
Empty(empty)
def action[F[_], R, O, S, E](a: Actions[F, R, O, S, E])(implicit em: ExecutionModel[F]): Plan[F, R, O, S, E] =
apply(a, Empty.instance[Invariants[O, S, E]])(em)
def invariants[F[_], R, O, S, E](i: Invariants[O, S, E])(implicit em: ExecutionModel[F]): Plan[F, R, O, S, E] =
apply[F, R, O, S, E](Empty.instance, i)(em)
implicit def planInstanceDisplay[F[_], R, O, S, E](implicit sa: Display[Actions[F, R, O, S, E]],
si: Display[Invariants[O, S, E]]): Display[Plan[F, R, O, S, E]] =
Display(p =>
s"""
|Invariants:
|${si.indent(p.invariants)}
|Actions:
|${sa.indent(p.actions)}
""".stripMargin.trim
)
}
final class Plan[F[_], R, O, S, E](override val name: Option[Name],
override val actions: Actions[F, R, O, S, E],
override val invariants: Invariants[O, S, E])
(implicit override val executionModel: ExecutionModel[F])
extends PlanLike[F, R, O, S, E, Plan[F, R, O, S, E]] {
override def toString: String =
s"Plan($name, $actions, $invariants)"
override type Self[FF[_], RR, OO, SS, EE] = Plan[FF, RR, OO, SS, EE]
override protected def plan = this
override protected def setPlan(p: Plan[F, R, O, S, E]) = p
def trans[G[_]: ExecutionModel](t: F ~~> G): Self[G, R, O, S, E] =
new Plan(name, actions trans t, invariants)
def mapR[R2](f: R2 => R): Self[F, R2, O, S, E] =
new Plan(name, actions mapR f, invariants)
def pmapR[R2](f: R2 => E Or R): Self[F, R2, O, S, E] =
new Plan(name, actions pmapR f, invariants)
def pmapO[OO](g: OO => E Or O): Self[F, R, OO, S, E] =
new Plan(name, actions pmapO g, invariants pmapO g)
def mapS[SS](g: SS => S)(s: (SS, S) => SS): Self[F, R, O, SS, E] =
new Plan(name, actions.mapS(g)(s), invariants.mapS(g))
def mapE[EE](f: E => EE): Self[F, R, O, S, EE] =
new Plan(name, actions mapE f, invariants mapE f)
def lift[F2[_], R2, O2, S2, E2](implicit t: Transformer[F, R, O, S, E, F2, R2, O2, S2, E2]): Self[F2, R2, O2, S2, E2] =
new Plan(name, t actions actions, t invariants invariants)(t.f2)
def withInitialState(s: S) =
PlanWithInitialState(this, s)
def stateless(implicit ev: Unit =:= S) =
withInitialState(())
def test(observer: Observer[R, O, E])(implicit a: ErrorHandler[E]) =
Test(this, observer, Retry.Policy.never)(a)
def testU(implicit ev: Observer[R, Unit, E] =:= Observer[R, O, E], a: ErrorHandler[E]) =
test(ev(Observer.unit))(a)
}
// █████████████████████████████████████████████████████████████████████████████████████████████████████████████████████
final case class PlanWithInitialState[F[_], R, O, S, E](override val plan: Plan[F, R, O, S, E], initialState: S)
extends PlanLike[F, R, O, S, E, PlanWithInitialState[F, R, O, S, E]] {
override type Self[FF[_], RR, OO, SS, EE] = PlanWithInitialState[FF, RR, OO, SS, EE]
override protected def setPlan(p: Plan[F, R, O, S, E]) = copy(plan = p)
def trans[G[_]: ExecutionModel](t: F ~~> G): Self[G, R, O, S, E] =
plan.trans(t).withInitialState(initialState)
def mapR[R2](f: R2 => R): Self[F, R2, O, S, E] =
plan.mapR(f).withInitialState(initialState)
def pmapR[R2](f: R2 => E Or R): Self[F, R2, O, S, E] =
plan.pmapR(f).withInitialState(initialState)
def pmapO[OO](g: OO => E Or O): Self[F, R, OO, S, E] =
plan.pmapO(g).withInitialState(initialState)
// def mapS[SS](g: SS => S)(s: (SS, S) => SS): Self[F, R, O, SS, E] =
// def mapS[SS](f: S => SS)(g: SS => S): Self[F, R, O, SS, E] =
// plan.mapS(g)((_, s) => f(s)).withInitialState(f(initialState))
def mapE[EE](f: E => EE): Self[F, R, O, S, EE] =
plan.mapE(f).withInitialState(initialState)
// def lift[F2[_], R2, O2, S2, E2](implicit t: Transformer[F, R, O, S, E, F2, R2, O2, S2, E2]): Self[F2, R2, O2, S2, E2] =
// plan.lift(t).withInitialState(initialState)
def test(observer: Observer[R, O, E])(implicit a: ErrorHandler[E]) =
TestWithInitialState(plan.test(observer)(a), initialState)
def testU(implicit ev: Observer[R, Unit, E] =:= Observer[R, O, E], a: ErrorHandler[E]) =
test(ev(Observer.unit))(a)
}
// █████████████████████████████████████████████████████████████████████████████████████████████████████████████████████
final case class Test[F[_], R, O, S, E](override val plan: Plan[F, R, O, S, E],
observer: Observer[R, O, E],
retryPolicy: Retry.Policy)
(implicit val attempt: ErrorHandler[E])
extends PlanLike[F, R, O, S, E, Test[F, R, O, S, E]] {
override type Self[FF[_], RR, OO, SS, EE] = Test[FF, RR, OO, SS, EE]
override def setPlan(p: Plan[F, R, O, S, E]): Self[F, R, O, S, E] =
copy(plan = p)
def withRetryPolicy(p: Retry.Policy): Test[F, R, O, S, E] =
copy(retryPolicy = p)
def trans[G[_]: ExecutionModel](t: F ~~> G): Self[G, R, O, S, E] =
copy(plan = plan trans t)
def mapR[R2](f: R2 => R): Self[F, R2, O, S, E] =
Test(plan mapR f, observer cmapR f, retryPolicy)
def pmapR[R2](f: R2 => E Or R): Self[F, R2, O, S, E] =
Test(plan pmapR f, observer pmapR f, retryPolicy)
// def pmapO[OO](g: OO => E Or O): Self[F, R, OO, S, E] =
// Test(plan pmapO g, observer pmapO g)
def mapS[SS](g: SS => S)(s: (SS, S) => SS): Self[F, R, O, SS, E] =
copy(plan = plan.mapS(g)(s))
def mapE[EE](f: E => EE): Self[F, R, O, S, EE] =
Test(plan mapE f, observer mapE f, retryPolicy)(attempt map f)
// def lift[F2[_], R2, O2, S2, E2](implicit t: Transformer[F, R, O, S, E, F2, R2, O2, S2, E2]): Self[F2, R2, O2, S2, E2] =
// Test(plan.lift(t), observer)(recover)
def withInitialState(s: S) =
TestWithInitialState(this, s)
def stateless(implicit ev: Unit =:= S) =
withInitialState(())
@deprecated("Use withInitialState(s).withRefByName(ref).run()", "2.2.0")
def run(initialState: S, ref: => R): F[Report[E]] =
withInitialState(initialState).withRefByName(ref).run()
def runU(initialState: S)(implicit ev: Unit =:= R): F[Report[E]] =
withInitialState(initialState).withoutRef.run()
}
// █████████████████████████████████████████████████████████████████████████████████████████████████████████████████████
final case class TestWithInitialState[F[_], R, O, S, E](test: Test[F, R, O, S, E], initialState: S)
extends PlanLike[F, R, O, S, E, TestWithInitialState[F, R, O, S, E]] {
override type Self[FF[_], RR, OO, SS, EE] = TestWithInitialState[FF, RR, OO, SS, EE]
override def plan = test.plan
override def setPlan(p: Plan[F, R, O, S, E]) = test.setPlan(p).withInitialState(initialState)
def recover = test.attempt
def observer = test.observer
def retryPolicy = test.retryPolicy
def withRetryPolicy(p: Retry.Policy): TestWithInitialState[F, R, O, S, E] =
copy(test = test.withRetryPolicy(p))
def trans[G[_]: ExecutionModel](t: F ~~> G): Self[G, R, O, S, E] =
test.trans(t).withInitialState(initialState)
def mapR[R2](f: R2 => R): Self[F, R2, O, S, E] =
test.mapR(f).withInitialState(initialState)
def pmapR[R2](f: R2 => E Or R): Self[F, R2, O, S, E] =
test.pmapR(f).withInitialState(initialState)
// def pmapO[OO](g: OO => E Or O): Self[F, R, OO, S, E] =
// test.pmapO(g).withInitialState(initialState)
// def mapS[SS](g: SS => S)(s: (SS, S) => SS): Self[F, R, O, SS, E] =
// test.mapS(g)((_, s) => f(s)).withInitialState(f(initialState))
def mapE[EE](f: E => EE): Self[F, R, O, S, EE] =
test.mapE(f).withInitialState(initialState)
// def lift[F2[_], R2, O2, S2, E2](implicit t: Transformer[F, R, O, S, E, F2, R2, O2, S2, E2]): Self[F2, R2, O2, S2, E2] =
// test.lift(t).withInitialState(initialState)
def planWithInitialState =
plan.withInitialState(initialState)
@deprecated("Use withRefByName(ref).run()", "2.2.0")
def run(ref: => R): F[Report[E]] =
withRefByName(ref).run()
def runU()(implicit ev: Unit =:= R): F[Report[E]] =
withoutRef.run()
def withRef(ref: R): RunnableTest[F, R, O, S, E] =
RunnableTest(test, initialState, () => () => ref)
/** ref is evaluated once per test run, and reused after that */
def withLazyRef(ref: => R): RunnableTest[F, R, O, S, E] =
RunnableTest(test, initialState, () => {
lazy val r: R = ref
val f: () => R = () => r
f
})
/** ref is evaluated each time it's used */
def withRefByName(ref: => R): RunnableTest[F, R, O, S, E] =
RunnableTest(test, initialState, () => () => ref)
def withoutRef(implicit ev: Unit =:= R): RunnableTest[F, R, O, S, E] =
RunnableTest(test, initialState, () => () => ())
}
// █████████████████████████████████████████████████████████████████████████████████████████████████████████████████████
final case class RunnableTest[F[_], R, O, S, E](test: Test[F, R, O, S, E], initialState: S, refFnFn: () => () => R) {
def plan = test.plan
def recover = test.attempt
def observer = test.observer
def retryPolicy = test.retryPolicy
def withRetryPolicy(p: Retry.Policy): RunnableTest[F, R, O, S, E] =
copy(test = test.withRetryPolicy(p))
def trans[G[_]: ExecutionModel](t: F ~~> G): RunnableTest[G, R, O, S, E] =
copy(test = test.trans(t))
def run(): F[Report[E]] =
Runner.run(test)(initialState, refFnFn())
}
| japgolly/test-state | core/shared/src/main/scala/teststate/run/Test.scala | Scala | apache-2.0 | 13,477 |
package com.themillhousegroup.arallon.traits
import com.themillhousegroup.arallon._
trait Serializing[TZ <: TimeZone] {
this: TimeInZone[TZ] =>
def serialize: String = TimeInZoneSerializing.serialize(this)
}
object TimeInZoneSerializing {
// There seems to be no way to express this just once: :-(
private val serializationFormat = """%dZ:%s"""
private val deserializationRegex = """^(\d*)Z:(.*)$""".r
def serialize[TZ <: TimeZone](t: TimeInZone[TZ]): String = {
val r = String.format(serializationFormat, t.utcMillis.asInstanceOf[Object], t.timezone.name)
// Ensure we can read our own output
if (deserialize(r) != Some(t)) {
throw new RuntimeException(s"Couldn't read our own serialized output: $r")
}
r
}
def deserialize(s: String): Option[TimeInZone[TimeZone]] = {
deserializationRegex.findFirstMatchIn(s).map { m =>
val utcMillis = m.group(1).toLong
val zoneString = m.group(2)
TimeInZone.fromMillis(utcMillis, zoneString)
}
}
}
| themillhousegroup/arallon | src/main/scala/com/themillhousegroup/arallon/traits/Serializing.scala | Scala | mit | 1,010 |
package auctionHouse
import akka.actor.ActorRef
sealed trait AuctionMessage
case class Start(var bidTimer: BidTimer) extends AuctionMessage
case class BidTimerExpired extends AuctionMessage
case class DeleteTimerExpired extends AuctionMessage
case class Bid(var bidAmount:Integer) extends AuctionMessage
case class Relist extends AuctionMessage
sealed trait AuctionStatus
case class AuctionEnded extends AuctionStatus
case class AuctionWon extends AuctionStatus
sealed trait BuyerMessage
case class BidRandomAuction extends BuyerMessage
sealed trait SellerMessage
case class ItemSold extends SellerMessage
case class ItemNotSold extends SellerMessage
sealed trait AuctionSearchMessage
case class CreateAuction(item: String) extends AuctionSearchMessage
case class SearchForKeyword(keyword: String) extends AuctionSearchMessage
case class MatchingAuctions(auctions: Vector[ActorRef]) extends AuctionSearchMessage | s1mplex/AuctionHouse | src/main/scala/auctionHouse/Messages.scala | Scala | mit | 919 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection
import scala.reflect._
/**
* A fast hash map implementation for primitive, non-null keys. This hash map supports
* insertions and updates, but not deletions. This map is about an order of magnitude
* faster than java.util.HashMap, while using much less space overhead.
*
* Under the hood, it uses our OpenHashSet implementation.
*/
private[spark]
class PrimitiveKeyOpenHashMap[@specialized(Long, Int) K: ClassTag,
@specialized(Long, Int, Double) V: ClassTag](
initialCapacity: Int)
extends Iterable[(K, V)]
with Serializable {
def this() = this(64)
require(classTag[K] == classTag[Long] || classTag[K] == classTag[Int])
// Init in constructor (instead of in declaration) to work around a Scala compiler specialization
// bug that would generate two arrays (one for Object and one for specialized T).
protected var _keySet: OpenHashSet[K] = _
private var _values: Array[V] = _
_keySet = new OpenHashSet[K](initialCapacity)
_values = new Array[V](_keySet.capacity)
private var _oldValues: Array[V] = null
override def size: Int = _keySet.size
/** Tests whether this map contains a binding for a key. */
def contains(k: K): Boolean = {
_keySet.getPos(k) != OpenHashSet.INVALID_POS
}
/** Get the value for a given key */
def apply(k: K): V = {
val pos = _keySet.getPos(k)
_values(pos)
}
/** Get the value for a given key, or returns elseValue if it doesn't exist. */
def getOrElse(k: K, elseValue: V): V = {
val pos = _keySet.getPos(k)
if (pos >= 0) _values(pos) else elseValue
}
/** Set the value for a key */
def update(k: K, v: V): Unit = {
val pos = _keySet.addWithoutResize(k) & OpenHashSet.POSITION_MASK
_values(pos) = v
_keySet.rehashIfNeeded(k, grow, move)
_oldValues = null
}
/**
* If the key doesn't exist yet in the hash map, set its value to defaultValue; otherwise,
* set its value to mergeValue(oldValue).
*
* @return the newly updated value.
*/
def changeValue(k: K, defaultValue: => V, mergeValue: (V) => V): V = {
val pos = _keySet.addWithoutResize(k)
if ((pos & OpenHashSet.NONEXISTENCE_MASK) != 0) {
val newValue = defaultValue
_values(pos & OpenHashSet.POSITION_MASK) = newValue
_keySet.rehashIfNeeded(k, grow, move)
newValue
} else {
_values(pos) = mergeValue(_values(pos))
_values(pos)
}
}
override def iterator: Iterator[(K, V)] = new Iterator[(K, V)] {
var pos = 0
var nextPair: (K, V) = computeNextPair()
/** Get the next value we should return from next(), or null if we're finished iterating */
def computeNextPair(): (K, V) = {
pos = _keySet.nextPos(pos)
if (pos >= 0) {
val ret = (_keySet.getValue(pos), _values(pos))
pos += 1
ret
} else {
null
}
}
def hasNext: Boolean = nextPair != null
def next(): (K, V) = {
val pair = nextPair
nextPair = computeNextPair()
pair
}
}
private def grow(newCapacity: Int): Unit = {
_oldValues = _values
_values = new Array[V](newCapacity)
}
private def move(oldPos: Int, newPos: Int): Unit = {
_values(newPos) = _oldValues(oldPos)
}
}
| mahak/spark | core/src/main/scala/org/apache/spark/util/collection/PrimitiveKeyOpenHashMap.scala | Scala | apache-2.0 | 4,081 |
package com.rasterfoundry.api.platform
import com.rasterfoundry.akkautil.PaginationDirectives
import com.rasterfoundry.akkautil.{
Authentication,
CommonHandlers,
UserErrorHandler
}
import com.rasterfoundry.api.utils.queryparams.QueryParametersCommon
import com.rasterfoundry.database.filter.Filterables._
import com.rasterfoundry.database.{
OrganizationDao,
PlatformDao,
TeamDao,
UserDao
}
import com.rasterfoundry.datamodel._
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server._
import cats.effect.IO
import cats.implicits._
import de.heikoseeberger.akkahttpcirce.ErrorAccumulatingCirceSupport._
import doobie._
import doobie.implicits._
import doobie.postgres.implicits._
import java.util.UUID
trait PlatformRoutes
extends Authentication
with PaginationDirectives
with CommonHandlers
with UserErrorHandler
with QueryParametersCommon {
val xa: Transactor[IO]
def validateOrganization(platformId: UUID, orgId: UUID): Directive0 =
authorizeAsync {
OrganizationDao
.validatePath(platformId, orgId)
.transact(xa)
.unsafeToFuture
}
def validateOrganizationAndTeam(platformId: UUID, orgId: UUID, teamId: UUID) =
authorizeAsync {
TeamDao
.validatePath(platformId, orgId, teamId)
.transact(xa)
.unsafeToFuture
}
val platformRoutes: Route = handleExceptions(userExceptionHandler) {
pathEndOrSingleSlash {
get {
listPlatforms
} ~
post {
createPlatform
}
} ~
pathPrefix(JavaUUID) { platformId =>
pathEndOrSingleSlash {
get {
getPlatform(platformId)
} ~
put {
updatePlatform(platformId)
} ~
post {
setPlatformStatus(platformId)
}
} ~
pathPrefix("members") {
pathEndOrSingleSlash {
get {
listPlatformMembers(platformId)
}
}
} ~
pathPrefix("teams") {
pathPrefix("search") {
pathEndOrSingleSlash {
get {
listPlatformUserTeams(platformId)
}
}
}
} ~
pathPrefix("organizations") {
pathEndOrSingleSlash {
get {
listPlatformOrganizations(platformId)
} ~
post {
createOrganization(platformId)
}
} ~
pathPrefix(JavaUUID) { orgId =>
pathEndOrSingleSlash {
get {
getOrganization(platformId, orgId)
} ~
put {
updateOrganization(platformId, orgId)
} ~
post {
setOrganizationStatus(platformId, orgId)
}
} ~
pathPrefix("members") {
pathEndOrSingleSlash {
{
get {
listOrganizationMembers(orgId)
} ~
post {
addUserToOrganization(platformId, orgId)
}
}
} ~
pathPrefix(Segment) { userId =>
delete {
removeUserFromOrganization(orgId, userId)
}
}
} ~
pathPrefix("teams") {
pathEndOrSingleSlash {
get {
listTeams(platformId, orgId)
} ~
post {
createTeam(platformId, orgId)
}
} ~
pathPrefix(JavaUUID) { teamId =>
pathEndOrSingleSlash {
get {
getTeam(platformId, orgId, teamId)
} ~
put {
updateTeam(platformId, orgId, teamId)
} ~
delete {
deleteTeam(platformId, orgId, teamId)
}
} ~
pathPrefix("members") {
pathEndOrSingleSlash {
get {
listTeamMembers(platformId, orgId, teamId)
} ~
post {
addUserToTeam(platformId, orgId, teamId)
}
} ~
pathPrefix(Segment) { userId =>
delete {
removeUserFromTeam(
platformId,
orgId,
teamId,
userId
)
}
}
}
}
}
}
}
}
}
// @TODO: most platform API interactions should be highly restricted -- only 'super-users' should
// be able to do list, create, update, delete. Non-super users can only get a platform if they belong to it.
def listPlatforms: Route = authenticate { user =>
authorizeScope(ScopedAction(Domain.Platforms, Action.Read, None), user) {
authorizeAsync {
UserDao.isSuperUser(user).transact(xa).unsafeToFuture
} {
withPagination { page =>
complete {
PlatformDao.listPlatforms(page).transact(xa).unsafeToFuture
}
}
}
}
}
def createPlatform: Route = authenticate { user =>
authorizeScope(ScopedAction(Domain.Platforms, Action.Create, None), user) {
authorizeAsync {
UserDao.isSuperUser(user).transact(xa).unsafeToFuture
} {
entity(as[Platform]) { platform =>
completeOrFail {
PlatformDao.create(platform).transact(xa).unsafeToFuture
}
}
}
}
}
def getPlatform(platformId: UUID): Route = authenticate { user =>
authorizeScope(ScopedAction(Domain.Platforms, Action.Read, None), user) {
authorizeAsync {
PlatformDao.userIsMember(user, platformId).transact(xa).unsafeToFuture
} {
rejectEmptyResponse {
complete {
PlatformDao.getPlatformById(platformId).transact(xa).unsafeToFuture
}
}
}
}
}
def updatePlatform(platformId: UUID): Route = authenticate { user =>
authorizeScope(ScopedAction(Domain.Platforms, Action.Update, None), user) {
authorizeAsync {
PlatformDao.userIsAdmin(user, platformId).transact(xa).unsafeToFuture
} {
entity(as[Platform]) { platformToUpdate =>
completeWithOneOrFail {
PlatformDao
.update(platformToUpdate, platformId)
.transact(xa)
.unsafeToFuture
}
}
}
}
}
def listPlatformMembers(platformId: UUID): Route = authenticate { user =>
authorizeScope(
ScopedAction(Domain.Platforms, Action.ListUsers, None),
user
) {
authorizeAsync {
PlatformDao.userIsAdmin(user, platformId).transact(xa).unsafeToFuture
} {
(withPagination & searchParams) { (page, searchParams) =>
complete {
PlatformDao
.listMembers(platformId, page, searchParams, user)
.transact(xa)
.unsafeToFuture
}
}
}
}
}
// List teams:
// - the operating user belongs to
// - the operating user can see due to organization membership
// - limited to first 5 ordered by team name
// - filtered by `search=<team name>` if specified
def listPlatformUserTeams(platformId: UUID): Route = authenticate { user =>
authorizeScope(ScopedAction(Domain.Teams, Action.Search, None), user) {
authorizeAsync {
PlatformDao.userIsMember(user, platformId).transact(xa).unsafeToFuture
} {
(searchParams) { (searchParams) =>
complete {
PlatformDao
.listPlatformUserTeams(user, searchParams)
.transact(xa)
.unsafeToFuture
}
}
}
}
}
def listPlatformOrganizations(platformId: UUID): Route = authenticate {
user =>
authorizeScope(
ScopedAction(Domain.Organizations, Action.Read, None),
user
) {
authorizeAsync {
PlatformDao.userIsAdmin(user, platformId).transact(xa).unsafeToFuture
} {
(withPagination & searchParams) { (page, search) =>
complete {
OrganizationDao
.listPlatformOrganizations(page, search, platformId, user)
.transact(xa)
.unsafeToFuture
}
}
}
}
}
def createOrganization(platformId: UUID): Route = authenticate { user =>
authorizeScope(
ScopedAction(Domain.Organizations, Action.Create, None),
user
) {
entity(as[Organization.Create]) { orgToCreate =>
completeOrFail {
val createdOrg = for {
isAdmin <- PlatformDao.userIsAdmin(user, platformId)
org <- OrganizationDao.create(orgToCreate.toOrganization(isAdmin))
} yield org
createdOrg.transact(xa).unsafeToFuture
}
}
}
}
def getOrganization(platformId: UUID, orgId: UUID): Route = authenticate {
user =>
authorizeScope(
ScopedAction(Domain.Organizations, Action.Read, None),
user
) {
authorizeAsync {
PlatformDao.userIsMember(user, platformId).transact(xa).unsafeToFuture
} {
validateOrganization(platformId, orgId) {
rejectEmptyResponse {
complete {
OrganizationDao.query
.filter(orgId)
.filter(fr"platform_id = ${platformId}")
.selectOption
.transact(xa)
.unsafeToFuture
}
}
}
}
}
}
def updateOrganization(platformId: UUID, orgId: UUID): Route = authenticate {
user =>
authorizeScope(
ScopedAction(Domain.Organizations, Action.Update, None),
user
) {
authorizeAsync {
OrganizationDao.userIsAdmin(user, orgId).transact(xa).unsafeToFuture
} {
validateOrganization(platformId, orgId) {
entity(as[Organization]) { orgToUpdate =>
completeWithOneOrFail {
OrganizationDao
.update(orgToUpdate, orgId)
.transact(xa)
.unsafeToFuture
}
}
}
}
}
}
def listOrganizationMembers(orgId: UUID): Route =
authenticate { user =>
authorizeScope(
ScopedAction(Domain.Organizations, Action.ListUsers, None),
user
) {
authorizeAsync {
// QUESTION: should this be open to members of the same platform?
OrganizationDao.userIsMember(user, orgId).transact(xa).unsafeToFuture
} {
(withPagination & searchParams) { (page, searchParams) =>
complete {
OrganizationDao
.listMembers(orgId, page, searchParams, user)
.transact(xa)
.unsafeToFuture
}
}
}
}
}
def addUserToOrganization(platformId: UUID, orgId: UUID): Route =
authenticate { user =>
authorizeScope(
ScopedAction(Domain.Organizations, Action.AddUser, None),
user
) {
entity(as[UserGroupRole.UserRole]) { ur =>
authorizeAsync {
val authCheck = (
OrganizationDao.userIsAdmin(user, orgId),
PlatformDao.userIsMember(user, platformId),
(user.id == ur.userId).pure[ConnectionIO]
).tupled.map(
{
case (true, _, _) | (_, true, true) => true
case _ => false
}
)
authCheck.transact(xa).unsafeToFuture
} {
complete {
OrganizationDao
.addUserRole(platformId, user, ur.userId, orgId, ur.groupRole)
.transact(xa)
.unsafeToFuture
}
}
}
}
}
def removeUserFromOrganization(orgId: UUID, userId: String): Route =
authenticate { user =>
authorizeScope(
ScopedAction(Domain.Organizations, Action.RemoveUser, None),
user
) {
authorizeAsync {
val authCheck = (
OrganizationDao.userIsAdmin(user, orgId),
(userId == user.id).pure[ConnectionIO]
).tupled.map(
{
case (true, _) | (_, true) => true
case _ => false
}
)
authCheck.transact(xa).unsafeToFuture
} {
complete {
OrganizationDao
.deactivateUserRoles(userId, orgId)
.transact(xa)
.unsafeToFuture
}
}
}
}
def listTeams(platformId: UUID, organizationId: UUID): Route = authenticate {
user =>
authorizeScope(ScopedAction(Domain.Teams, Action.Read, None), user) {
authorizeAsync {
OrganizationDao
.userIsMember(user, organizationId)
.transact(xa)
.unsafeToFuture
} {
validateOrganization(platformId, organizationId) {
(withPagination & teamQueryParameters) { (page, teamQueryParams) =>
complete {
TeamDao
.listOrgTeams(organizationId, page, teamQueryParams)
.transact(xa)
.unsafeToFuture
}
}
}
}
}
}
def createTeam(platformId: UUID, orgId: UUID): Route = authenticate { user =>
authorizeScope(ScopedAction(Domain.Teams, Action.Create, None), user) {
authorizeAsync {
OrganizationDao.userIsMember(user, orgId).transact(xa).unsafeToFuture
} {
validateOrganization(platformId, orgId) {
entity(as[Team.Create]) { newTeamCreate =>
onSuccess(
OrganizationDao
.userIsAdmin(user, orgId)
.flatMap {
case true => TeamDao.create(newTeamCreate.toTeam(user))
case _ =>
TeamDao.createWithRole(
newTeamCreate.toTeam(user),
user
)
}
.transact(xa)
.unsafeToFuture()
) { team =>
complete(StatusCodes.Created, team)
}
}
}
}
}
}
def getTeam(platformId: UUID, orgId: UUID, teamId: UUID): Route =
authenticate { user =>
authorizeScope(ScopedAction(Domain.Teams, Action.Read, None), user) {
authorizeAsync {
OrganizationDao.userIsMember(user, orgId).transact(xa).unsafeToFuture
} {
validateOrganizationAndTeam(platformId, orgId, teamId) {
rejectEmptyResponse {
complete {
TeamDao.query
.filter(teamId)
.selectOption
.transact(xa)
.unsafeToFuture
}
}
}
}
}
}
def updateTeam(platformId: UUID, orgId: UUID, teamId: UUID): Route =
authenticate { user =>
authorizeScope(ScopedAction(Domain.Teams, Action.Update, None), user) {
authorizeAsync {
TeamDao.userIsAdmin(user, teamId).transact(xa).unsafeToFuture
} {
validateOrganizationAndTeam(platformId, orgId, teamId) {
entity(as[Team]) { updatedTeam =>
onSuccess {
TeamDao
.update(updatedTeam, teamId)
.transact(xa)
.unsafeToFuture
} { team =>
complete(StatusCodes.OK, team)
}
}
}
}
}
}
def deleteTeam(platformId: UUID, orgId: UUID, teamId: UUID): Route =
authenticate { user =>
authorizeScope(ScopedAction(Domain.Teams, Action.Delete, None), user) {
authorizeAsync {
TeamDao.userIsAdmin(user, teamId).transact(xa).unsafeToFuture
} {
validateOrganizationAndTeam(platformId, orgId, teamId) {
completeWithOneOrFail {
TeamDao.deactivate(teamId).transact(xa).unsafeToFuture
}
}
}
}
}
def listTeamMembers(platformId: UUID, orgId: UUID, teamId: UUID): Route =
authenticate { user =>
authorizeScope(ScopedAction(Domain.Teams, Action.ListUsers, None), user) {
authorizeAsync {
// The authorization here is necessary to allow users within cross-organizational teams to view
// the members within those teams
val decisionIO = for {
isTeamMember <- TeamDao.userIsMember(user, teamId)
isOrganizationMember <- OrganizationDao.userIsMember(user, orgId)
} yield {
isTeamMember || isOrganizationMember
}
decisionIO.transact(xa).unsafeToFuture
} {
validateOrganizationAndTeam(platformId, orgId, teamId) {
(withPagination & searchParams) { (page, searchParams) =>
complete {
TeamDao
.listMembers(teamId, page, searchParams, user)
.transact(xa)
.unsafeToFuture
}
}
}
}
}
}
def addUserToTeam(platformId: UUID, orgId: UUID, teamId: UUID): Route =
authenticate { user =>
authorizeScope(ScopedAction(Domain.Teams, Action.AddUser, None), user) {
entity(as[UserGroupRole.UserRole]) { ur =>
authorizeAsync {
val authCheck = (
TeamDao.userIsAdmin(user, teamId),
PlatformDao.userIsMember(user, platformId),
(user.id == ur.userId).pure[ConnectionIO]
).tupled.map(
{
case (true, _, _) | (_, true, true) => true
case _ => false
}
)
authCheck.transact(xa).unsafeToFuture
} {
validateOrganizationAndTeam(platformId, orgId, teamId) {
complete {
TeamDao
.addUserRole(
platformId,
user,
ur.userId,
teamId,
ur.groupRole
)
.transact(xa)
.unsafeToFuture
}
}
}
}
}
}
def removeUserFromTeam(
platformId: UUID,
orgId: UUID,
teamId: UUID,
userId: String
): Route = authenticate { user =>
authorizeScope(ScopedAction(Domain.Teams, Action.RemoveUser, None), user) {
authorizeAsync {
val authCheck = (
TeamDao.userIsAdmin(user, teamId),
(userId == user.id).pure[ConnectionIO]
).tupled.map(
{
case (true, _) | (_, true) => true
case _ => false
}
)
authCheck.transact(xa).unsafeToFuture
} {
validateOrganizationAndTeam(platformId, orgId, teamId) {
complete {
TeamDao
.deactivateUserRoles(userId, teamId)
.transact(xa)
.unsafeToFuture
}
}
}
}
}
def setPlatformStatus(platformId: UUID): Route = authenticate { user =>
authorizeScope(ScopedAction(Domain.Platforms, Action.Update, None), user) {
authorizeAsync {
PlatformDao.userIsAdmin(user, platformId).transact(xa).unsafeToFuture
} {
entity(as[ActiveStatus]) {
case ActiveStatus(true) =>
activatePlatform(platformId, user)
case ActiveStatus(false) =>
deactivatePlatform(platformId)
}
}
}
}
def activatePlatform(platformId: UUID, user: User): Route =
authorizeScope(ScopedAction(Domain.Platforms, Action.Update, None), user) {
authorizeAsync {
UserDao.isSuperUser(user).transact(xa).unsafeToFuture
} {
complete {
PlatformDao.activatePlatform(platformId).transact(xa).unsafeToFuture
}
}
}
def deactivatePlatform(platformId: UUID): Route = complete {
PlatformDao.deactivatePlatform(platformId).transact(xa).unsafeToFuture
}
def setOrganizationStatus(platformId: UUID, organizationId: UUID): Route =
authenticate { user =>
authorizeScope(
ScopedAction(Domain.Organizations, Action.Update, None),
user
) {
authorizeAsync {
OrganizationDao
.userIsAdmin(user, organizationId)
.transact(xa)
.unsafeToFuture
} {
validateOrganization(platformId, organizationId) {
entity(as[String]) {
case status: String if status == OrgStatus.Active.toString =>
activateOrganization(platformId, organizationId, user)
case status: String if status == OrgStatus.Inactive.toString =>
deactivateOrganization(organizationId, user)
}
}
}
}
}
def activateOrganization(
platformId: UUID,
organizationId: UUID,
user: User
): Route =
authorizeScope(
ScopedAction(Domain.Organizations, Action.Update, None),
user
) {
authorizeAsync {
PlatformDao.userIsAdmin(user, platformId).transact(xa).unsafeToFuture
} {
complete {
OrganizationDao
.activateOrganization(organizationId)
.transact(xa)
.unsafeToFuture
}
}
}
def deactivateOrganization(organizationId: UUID, user: User): Route =
authorizeScope(
ScopedAction(Domain.Organizations, Action.Update, None),
user
) {
authorizeAsync {
OrganizationDao
.userIsAdmin(user, organizationId)
.transact(xa)
.unsafeToFuture
} {
complete {
OrganizationDao
.deactivateOrganization(organizationId)
.transact(xa)
.unsafeToFuture
}
}
}
}
| raster-foundry/raster-foundry | app-backend/api/src/main/scala/platform/Routes.scala | Scala | apache-2.0 | 23,107 |
package scintuit.data.raw
import com.github.nscala_money.money.Imports._
import com.github.nscala_time.time.Imports._
/**
* Module for security info resources
*/
object security {
sealed trait SecurityInfo {
val assetClass: Option[String]
val fiAssetClass: Option[String]
val ticker: Option[String]
val uniqueId: Option[String]
val uniqueIdType: Option[String]
val asOfDate: Option[DateTime]
val rating: Option[String]
val fiId: Option[String]
val name: Option[String]
val fundName: Option[String]
val memo: Option[String]
val symbolRefId: Option[String]
val currencyCode: Option[CurrencyUnit]
val currencyRate: Option[BigDecimal]
val unitPrice: Option[BigDecimal]
}
case class DebtSecurityInfo(
assetClass: Option[String],
fiAssetClass: Option[String],
ticker: Option[String],
uniqueId: Option[String],
uniqueIdType: Option[String],
asOfDate: Option[DateTime],
rating: Option[String],
percent: Option[BigDecimal],
fiId: Option[String],
name: Option[String],
fundName: Option[String],
memo: Option[String],
symbolRefId: Option[String],
currencyCode: Option[CurrencyUnit],
currencyRate: Option[BigDecimal],
unitPrice: Option[BigDecimal],
callDate: Option[DateTime],
yieldToCall: Option[BigDecimal],
callPrice: Option[BigDecimal],
callType: Option[String],
couponFreq: Option[String],
couponMaturityDate: Option[DateTime],
couponRate: Option[BigDecimal],
debtClass: Option[String],
debtType: Option[String],
maturityDate: Option[DateTime],
yieldToMaturity: Option[BigDecimal],
parValue: Option[BigDecimal]
) extends SecurityInfo
case class MutualFundSecurityInfo(
assetClass: Option[String],
fiAssetClass: Option[String],
ticker: Option[String],
uniqueId: Option[String],
uniqueIdType: Option[String],
asOfDate: Option[DateTime],
rating: Option[String],
percent: Option[BigDecimal],
fiId: Option[String],
name: Option[String],
fundName: Option[String],
memo: Option[String],
symbolRefId: Option[String],
currencyCode: Option[CurrencyUnit],
currencyRate: Option[BigDecimal],
unitPrice: Option[BigDecimal],
mfType: Option[String],
fundManager: Option[String],
`yield`: Option[BigDecimal],
yieldAsOfDate: Option[DateTime]
) extends SecurityInfo
case class OptionSecurityInfo(
assetClass: Option[String],
fiAssetClass: Option[String],
ticker: Option[String],
uniqueId: Option[String],
uniqueIdType: Option[String],
asOfDate: Option[DateTime],
rating: Option[String],
percent: Option[BigDecimal],
fiId: Option[String],
name: Option[String],
fundName: Option[String],
memo: Option[String],
symbolRefId: Option[String],
currencyCode: Option[CurrencyUnit],
currencyRate: Option[BigDecimal],
unitPrice: Option[BigDecimal],
expireDate: Option[DateTime],
strikePrice: Option[BigDecimal],
optType: Option[String],
securityUniqueId: Option[String],
securityUniqueIdType: Option[String],
sharesPerContract: Option[Long]
) extends SecurityInfo
case class StockSecurityInfo(
assetClass: Option[String],
fiAssetClass: Option[String],
ticker: Option[String],
uniqueId: Option[String],
uniqueIdType: Option[String],
asOfDate: Option[DateTime],
rating: Option[String],
percent: Option[BigDecimal],
fiId: Option[String],
name: Option[String],
fundName: Option[String],
memo: Option[String],
symbolRefId: Option[String],
currencyCode: Option[CurrencyUnit],
currencyRate: Option[BigDecimal],
unitPrice: Option[BigDecimal],
stockType: Option[String],
`yield`: Option[BigDecimal],
yieldAsOfDate: Option[DateTime]
) extends SecurityInfo
case class OtherSecurityInfo(
assetClass: Option[String],
fiAssetClass: Option[String],
ticker: Option[String],
uniqueId: Option[String],
uniqueIdType: Option[String],
asOfDate: Option[DateTime],
rating: Option[String],
percent: Option[BigDecimal],
fiId: Option[String],
name: Option[String],
fundName: Option[String],
memo: Option[String],
symbolRefId: Option[String],
currencyCode: Option[CurrencyUnit],
currencyRate: Option[BigDecimal],
unitPrice: Option[BigDecimal],
typeDesc: Option[String]
) extends SecurityInfo
}
| drbild/scintuit | data/src/main/scala/scintuit/data/raw/security.scala | Scala | apache-2.0 | 4,449 |
package slick.ast
import TypeUtil.typeToTypeUtil
import Util._
import slick.util.ConstArray
/** A SQL comprehension */
final case class Comprehension(sym: TermSymbol, from: Node, select: Node, where: Option[Node] = None,
groupBy: Option[Node] = None, orderBy: ConstArray[(Node, Ordering)] = ConstArray.empty,
having: Option[Node] = None,
distinct: Option[Node] = None,
fetch: Option[Node] = None, offset: Option[Node] = None) extends DefNode {
type Self = Comprehension
lazy val children = (ConstArray.newBuilder() + from + select ++ where ++ groupBy ++ orderBy.map(_._1) ++ having ++ distinct ++ fetch ++ offset).result
override def childNames =
Seq("from "+sym, "select") ++
where.map(_ => "where") ++
groupBy.map(_ => "groupBy") ++
orderBy.map("orderBy " + _._2).toSeq ++
having.map(_ => "having") ++
distinct.map(_ => "distinct") ++
fetch.map(_ => "fetch") ++
offset.map(_ => "offset")
protected[this] def rebuild(ch: ConstArray[Node]) = {
val newFrom = ch(0)
val newSelect = ch(1)
val whereOffset = 2
val newWhere = ch.slice(whereOffset, whereOffset + where.productArity)
val groupByOffset = whereOffset + newWhere.length
val newGroupBy = ch.slice(groupByOffset, groupByOffset + groupBy.productArity)
val orderByOffset = groupByOffset + newGroupBy.length
val newOrderBy = ch.slice(orderByOffset, orderByOffset + orderBy.length)
val havingOffset = orderByOffset + newOrderBy.length
val newHaving = ch.slice(havingOffset, havingOffset + having.productArity)
val distinctOffset = havingOffset + newHaving.length
val newDistinct = ch.slice(distinctOffset, distinctOffset + distinct.productArity)
val fetchOffset = distinctOffset + newDistinct.length
val newFetch = ch.slice(fetchOffset, fetchOffset + fetch.productArity)
val offsetOffset = fetchOffset + newFetch.length
val newOffset = ch.slice(offsetOffset, offsetOffset + offset.productArity)
copy(
from = newFrom,
select = newSelect,
where = newWhere.headOption,
groupBy = newGroupBy.headOption,
orderBy = orderBy.zip(newOrderBy).map { case ((_, o), n) => (n, o) },
having = newHaving.headOption,
distinct = newDistinct.headOption,
fetch = newFetch.headOption,
offset = newOffset.headOption
)
}
def generators = ConstArray((sym, from))
protected[this] def rebuildWithSymbols(gen: ConstArray[TermSymbol]) = copy(sym = gen.head)
def withInferredType(scope: Type.Scope, typeChildren: Boolean): Self = {
// Assign type to "from" Node and compute the resulting scope
val f2 = from.infer(scope, typeChildren)
val genScope = scope + (sym -> f2.nodeType.asCollectionType.elementType)
// Assign types to "select", "where", "groupBy", "orderBy", "having", "distinct", "fetch" and "offset" Nodes
val s2 = select.infer(genScope, typeChildren)
val w2 = mapOrNone(where)(_.infer(genScope, typeChildren))
val g2 = mapOrNone(groupBy)(_.infer(genScope, typeChildren))
val o = orderBy.map(_._1)
val o2 = o.endoMap(_.infer(genScope, typeChildren))
val h2 = mapOrNone(having)(_.infer(genScope, typeChildren))
val distinct2 = mapOrNone(distinct)(_.infer(genScope, typeChildren))
val fetch2 = mapOrNone(fetch)(_.infer(genScope, typeChildren))
val offset2 = mapOrNone(offset)(_.infer(genScope, typeChildren))
// Check if the nodes changed
val same = (f2 eq from) && (s2 eq select) && w2.isEmpty && g2.isEmpty && (o2 eq o) && h2.isEmpty &&
distinct2.isEmpty && fetch2.isEmpty && offset2.isEmpty
val newType =
if(!hasType) CollectionType(f2.nodeType.asCollectionType.cons, s2.nodeType.asCollectionType.elementType)
else nodeType
if(same && newType == nodeType) this else {
copy(
from = f2,
select = s2,
where = w2.orElse(where),
groupBy = g2.orElse(groupBy),
orderBy = if(o2 eq o) orderBy else orderBy.zip(o2).map { case ((_, o), n) => (n, o) },
having = h2.orElse(having),
distinct = distinct2.orElse(distinct),
fetch = fetch2.orElse(fetch),
offset = offset2.orElse(offset)
) :@ newType
}
}
}
/** The row_number window function */
final case class RowNumber(by: ConstArray[(Node, Ordering)] = ConstArray.empty) extends SimplyTypedNode {
type Self = RowNumber
def buildType = ScalaBaseType.longType
lazy val children = by.map(_._1)
protected[this] def rebuild(ch: ConstArray[Node]) =
copy(by = by.zip(ch).map{ case ((_, o), n) => (n, o) })
override def childNames = by.zipWithIndex.map("by" + _._2).toSeq
override def getDumpInfo = super.getDumpInfo.copy(mainInfo = "")
}
| bmclane/slick | slick/src/main/scala/slick/ast/Comprehension.scala | Scala | bsd-2-clause | 4,794 |
/*
* scala-bcp-rpc
* Copyright 2014 深圳岂凡网络有限公司 (Shenzhen QiFun Network Corp., LTD)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.qifun.bcp.rpc
import net.sandrogrzicic.scalabuff.Message
import com.google.protobuf.GeneratedMessageLite
import scala.reflect.ClassTag
import scala.reflect.runtime.universe._
private[rpc] trait IResponseHandler {
def responseType: TypeTag[GeneratedMessageLite]
def onSuccess(message: GeneratedMessageLite): Unit
def onFailure(message: GeneratedMessageLite): Unit
} | qifun/scala-bcp-rpc | src/main/scala/com/qifun/bcp/rpc/IResponseHandler.scala | Scala | apache-2.0 | 1,067 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.nio.ByteBuffer
import kafka.api.ApiUtils._
import kafka.common.{ErrorMapping, OffsetAndMetadata, TopicAndPartition}
import kafka.network.{RequestOrResponseSend, RequestChannel}
import kafka.network.RequestChannel.Response
import kafka.utils.Logging
import org.apache.kafka.common.protocol.ApiKeys
import scala.collection._
object OffsetCommitRequest extends Logging {
val CurrentVersion: Short = 2
val DefaultClientId = ""
def readFrom(buffer: ByteBuffer): OffsetCommitRequest = {
// Read values from the envelope
val versionId = buffer.getShort
assert(versionId == 0 || versionId == 1 || versionId == 2,
"Version " + versionId + " is invalid for OffsetCommitRequest. Valid versions are 0, 1 or 2.")
val correlationId = buffer.getInt
val clientId = readShortString(buffer)
// Read the OffsetRequest
val groupId = readShortString(buffer)
// version 1 and 2 specific fields
val groupGenerationId: Int =
if (versionId >= 1)
buffer.getInt
else
org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_GENERATION_ID
val memberId: String =
if (versionId >= 1)
readShortString(buffer)
else
org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_MEMBER_ID
// version 2 specific fields
val retentionMs: Long =
if (versionId >= 2)
buffer.getLong
else
org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_RETENTION_TIME
val topicCount = buffer.getInt
val pairs = (1 to topicCount).flatMap(_ => {
val topic = readShortString(buffer)
val partitionCount = buffer.getInt
(1 to partitionCount).map(_ => {
val partitionId = buffer.getInt
val offset = buffer.getLong
val timestamp = {
// version 1 specific field
if (versionId == 1)
buffer.getLong
else
org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_TIMESTAMP
}
val metadata = readShortString(buffer)
(TopicAndPartition(topic, partitionId), OffsetAndMetadata(offset, metadata, timestamp))
})
})
OffsetCommitRequest(groupId, immutable.Map(pairs:_*), versionId, correlationId, clientId, groupGenerationId, memberId, retentionMs)
}
}
case class OffsetCommitRequest(groupId: String,
requestInfo: immutable.Map[TopicAndPartition, OffsetAndMetadata],
versionId: Short = OffsetCommitRequest.CurrentVersion,
correlationId: Int = 0,
clientId: String = OffsetCommitRequest.DefaultClientId,
groupGenerationId: Int = org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_GENERATION_ID,
memberId: String = org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_MEMBER_ID,
retentionMs: Long = org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_RETENTION_TIME)
extends RequestOrResponse(Some(ApiKeys.OFFSET_COMMIT.id)) {
assert(versionId == 0 || versionId == 1 || versionId == 2,
"Version " + versionId + " is invalid for OffsetCommitRequest. Valid versions are 0, 1 or 2.")
lazy val requestInfoGroupedByTopic = requestInfo.groupBy(_._1.topic)
def writeTo(buffer: ByteBuffer) {
// Write envelope
buffer.putShort(versionId)
buffer.putInt(correlationId)
writeShortString(buffer, clientId)
// Write OffsetCommitRequest
writeShortString(buffer, groupId) // consumer group
// version 1 and 2 specific data
if (versionId >= 1) {
buffer.putInt(groupGenerationId)
writeShortString(buffer, memberId)
}
// version 2 or above specific data
if (versionId >= 2) {
buffer.putLong(retentionMs)
}
buffer.putInt(requestInfoGroupedByTopic.size) // number of topics
requestInfoGroupedByTopic.foreach( t1 => { // topic -> Map[TopicAndPartition, OffsetMetadataAndError]
writeShortString(buffer, t1._1) // topic
buffer.putInt(t1._2.size) // number of partitions for this topic
t1._2.foreach( t2 => {
buffer.putInt(t2._1.partition)
buffer.putLong(t2._2.offset)
// version 1 specific data
if (versionId == 1)
buffer.putLong(t2._2.commitTimestamp)
writeShortString(buffer, t2._2.metadata)
})
})
}
override def sizeInBytes =
2 + /* versionId */
4 + /* correlationId */
shortStringLength(clientId) +
shortStringLength(groupId) +
(if (versionId >= 1) 4 /* group generation id */ + shortStringLength(memberId) else 0) +
(if (versionId >= 2) 8 /* retention time */ else 0) +
4 + /* topic count */
requestInfoGroupedByTopic.foldLeft(0)((count, topicAndOffsets) => {
val (topic, offsets) = topicAndOffsets
count +
shortStringLength(topic) + /* topic */
4 + /* number of partitions */
offsets.foldLeft(0)((innerCount, offsetAndMetadata) => {
innerCount +
4 /* partition */ +
8 /* offset */ +
(if (versionId == 1) 8 else 0) /* timestamp */ +
shortStringLength(offsetAndMetadata._2.metadata)
})
})
override def handleError(e: Throwable, requestChannel: RequestChannel, request: RequestChannel.Request): Unit = {
val errorCode = ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]])
val commitStatus = requestInfo.mapValues(_ => errorCode)
val commitResponse = OffsetCommitResponse(commitStatus, correlationId)
requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, commitResponse)))
}
override def describe(details: Boolean): String = {
val offsetCommitRequest = new StringBuilder
offsetCommitRequest.append("Name: " + this.getClass.getSimpleName)
offsetCommitRequest.append("; Version: " + versionId)
offsetCommitRequest.append("; CorrelationId: " + correlationId)
offsetCommitRequest.append("; ClientId: " + clientId)
offsetCommitRequest.append("; GroupId: " + groupId)
offsetCommitRequest.append("; GroupGenerationId: " + groupGenerationId)
offsetCommitRequest.append("; MemberId: " + memberId)
offsetCommitRequest.append("; RetentionMs: " + retentionMs)
if(details)
offsetCommitRequest.append("; RequestInfo: " + requestInfo.mkString(","))
offsetCommitRequest.toString()
}
override def toString = {
describe(details = true)
}
}
| prashantbh/kafka | core/src/main/scala/kafka/api/OffsetCommitRequest.scala | Scala | apache-2.0 | 7,380 |
package org.http4s
package headers
import org.http4s.parser.HttpHeaderParser
import org.http4s.util.{Renderer, Writer}
import scala.concurrent.duration.{FiniteDuration, _}
import scala.util.Try
object Age extends HeaderKey.Internal[Age] with HeaderKey.Singleton {
private class AgeImpl(age: Long) extends Age(age)
def fromLong(age: Long): ParseResult[Age] =
if (age >= 0) {
ParseResult.success(new AgeImpl(age))
} else {
ParseResult.fail("Invalid age value", s"Age param $age must be more or equal to 0 seconds")
}
def unsafeFromDuration(age: FiniteDuration): Age =
fromLong(age.toSeconds).fold(throw _, identity)
def unsafeFromLong(age: Long): Age =
fromLong(age).fold(throw _, identity)
override def parse(s: String): ParseResult[Age] =
HttpHeaderParser.AGE(s)
}
/**
* Constructs an Age header.
*
* The value of this field is a positive number of seconds (in decimal) with an estimate of the amount of time since the response
*
* @param age age of the response
*/
sealed abstract case class Age(age: Long) extends Header.Parsed {
val key = Age
override val value = Renderer.renderString(age)
override def renderValue(writer: Writer): writer.type = writer.append(value)
def duration: Option[FiniteDuration] = Try(age.seconds).toOption
def unsafeDuration: FiniteDuration = age.seconds
}
| reactormonk/http4s | core/src/main/scala/org/http4s/headers/Age.scala | Scala | apache-2.0 | 1,369 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import java.io._
import java.lang.reflect.{InvocationTargetException, UndeclaredThrowableException}
import java.net.{URI, URL}
import java.security.PrivilegedExceptionAction
import java.text.ParseException
import java.util.{ServiceLoader, UUID}
import java.util.jar.JarInputStream
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.util.{Properties, Try}
import org.apache.commons.io.FilenameUtils
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.conf.{Configuration => HadoopConfiguration}
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.ivy.Ivy
import org.apache.ivy.core.LogOptions
import org.apache.ivy.core.module.descriptor._
import org.apache.ivy.core.module.id.{ArtifactId, ModuleId, ModuleRevisionId}
import org.apache.ivy.core.report.ResolveReport
import org.apache.ivy.core.resolve.ResolveOptions
import org.apache.ivy.core.retrieve.RetrieveOptions
import org.apache.ivy.core.settings.IvySettings
import org.apache.ivy.plugins.matcher.GlobPatternMatcher
import org.apache.ivy.plugins.repository.file.FileRepository
import org.apache.ivy.plugins.resolver.{ChainResolver, FileSystemResolver, IBiblioResolver}
import org.apache.spark._
import org.apache.spark.api.r.RUtils
import org.apache.spark.deploy.rest._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.UI._
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.util._
/**
* Whether to submit, kill, or request the status of an application.
* The latter two operations are currently supported only for standalone and Mesos cluster modes.
*/
private[deploy] object SparkSubmitAction extends Enumeration {
type SparkSubmitAction = Value
val SUBMIT, KILL, REQUEST_STATUS, PRINT_VERSION = Value
}
/**
* Main gateway of launching a Spark application.
*
* This program handles setting up the classpath with relevant Spark dependencies and provides
* a layer over the different cluster managers and deploy modes that Spark supports.
*/
private[spark] class SparkSubmit extends Logging {
import DependencyUtils._
import SparkSubmit._
def doSubmit(args: Array[String]): Unit = {
// Initialize logging if it hasn't been done yet. Keep track of whether logging needs to
// be reset before the application starts.
val uninitLog = initializeLogIfNecessary(true, silent = true)
val appArgs = parseArguments(args)
if (appArgs.verbose) {
logInfo(appArgs.toString)
}
appArgs.action match {
case SparkSubmitAction.SUBMIT => submit(appArgs, uninitLog)
case SparkSubmitAction.KILL => kill(appArgs)
case SparkSubmitAction.REQUEST_STATUS => requestStatus(appArgs)
case SparkSubmitAction.PRINT_VERSION => printVersion()
}
}
protected def parseArguments(args: Array[String]): SparkSubmitArguments = {
new SparkSubmitArguments(args)
}
/**
* Kill an existing submission.
*/
private def kill(args: SparkSubmitArguments): Unit = {
if (RestSubmissionClient.supportsRestClient(args.master)) {
new RestSubmissionClient(args.master)
.killSubmission(args.submissionToKill)
} else {
val sparkConf = args.toSparkConf()
sparkConf.set("spark.master", args.master)
SparkSubmitUtils
.getSubmitOperations(args.master)
.kill(args.submissionToKill, sparkConf)
}
}
/**
* Request the status of an existing submission.
*/
private def requestStatus(args: SparkSubmitArguments): Unit = {
if (RestSubmissionClient.supportsRestClient(args.master)) {
new RestSubmissionClient(args.master)
.requestSubmissionStatus(args.submissionToRequestStatusFor)
} else {
val sparkConf = args.toSparkConf()
sparkConf.set("spark.master", args.master)
SparkSubmitUtils
.getSubmitOperations(args.master)
.printSubmissionStatus(args.submissionToRequestStatusFor, sparkConf)
}
}
/** Print version information to the log. */
private def printVersion(): Unit = {
logInfo("""Welcome to
____ __
/ __/__ ___ _____/ /__
_\\ \\/ _ \\/ _ `/ __/ '_/
/___/ .__/\\_,_/_/ /_/\\_\\ version %s
/_/
""".format(SPARK_VERSION))
logInfo("Using Scala %s, %s, %s".format(
Properties.versionString, Properties.javaVmName, Properties.javaVersion))
logInfo(s"Branch $SPARK_BRANCH")
logInfo(s"Compiled by user $SPARK_BUILD_USER on $SPARK_BUILD_DATE")
logInfo(s"Revision $SPARK_REVISION")
logInfo(s"Url $SPARK_REPO_URL")
logInfo("Type --help for more information.")
}
/**
* Submit the application using the provided parameters, ensuring to first wrap
* in a doAs when --proxy-user is specified.
*/
@tailrec
private def submit(args: SparkSubmitArguments, uninitLog: Boolean): Unit = {
def doRunMain(): Unit = {
if (args.proxyUser != null) {
val proxyUser = UserGroupInformation.createProxyUser(args.proxyUser,
UserGroupInformation.getCurrentUser())
try {
proxyUser.doAs(new PrivilegedExceptionAction[Unit]() {
override def run(): Unit = {
runMain(args, uninitLog)
}
})
} catch {
case e: Exception =>
// Hadoop's AuthorizationException suppresses the exception's stack trace, which
// makes the message printed to the output by the JVM not very helpful. Instead,
// detect exceptions with empty stack traces here, and treat them differently.
if (e.getStackTrace().length == 0) {
error(s"ERROR: ${e.getClass().getName()}: ${e.getMessage()}")
} else {
throw e
}
}
} else {
runMain(args, uninitLog)
}
}
// In standalone cluster mode, there are two submission gateways:
// (1) The traditional RPC gateway using o.a.s.deploy.Client as a wrapper
// (2) The new REST-based gateway introduced in Spark 1.3
// The latter is the default behavior as of Spark 1.3, but Spark submit will fail over
// to use the legacy gateway if the master endpoint turns out to be not a REST server.
if (args.isStandaloneCluster && args.useRest) {
try {
logInfo("Running Spark using the REST application submission protocol.")
doRunMain()
} catch {
// Fail over to use the legacy submission gateway
case e: SubmitRestConnectionException =>
logWarning(s"Master endpoint ${args.master} was not a REST server. " +
"Falling back to legacy submission gateway instead.")
args.useRest = false
submit(args, false)
}
// In all other modes, just run the main class as prepared
} else {
doRunMain()
}
}
/**
* Prepare the environment for submitting an application.
*
* @param args the parsed SparkSubmitArguments used for environment preparation.
* @param conf the Hadoop Configuration, this argument will only be set in unit test.
* @return a 4-tuple:
* (1) the arguments for the child process,
* (2) a list of classpath entries for the child,
* (3) a map of system properties, and
* (4) the main class for the child
*
* Exposed for testing.
*/
private[deploy] def prepareSubmitEnvironment(
args: SparkSubmitArguments,
conf: Option[HadoopConfiguration] = None)
: (Seq[String], Seq[String], SparkConf, String) = {
// Return values
val childArgs = new ArrayBuffer[String]()
val childClasspath = new ArrayBuffer[String]()
val sparkConf = args.toSparkConf()
var childMainClass = ""
// Set the cluster manager
val clusterManager: Int = args.master match {
case "yarn" => YARN
case m if m.startsWith("spark") => STANDALONE
case m if m.startsWith("mesos") => MESOS
case m if m.startsWith("k8s") => KUBERNETES
case m if m.startsWith("local") => LOCAL
case _ =>
error("Master must either be yarn or start with spark, mesos, k8s, or local")
-1
}
// Set the deploy mode; default is client mode
var deployMode: Int = args.deployMode match {
case "client" | null => CLIENT
case "cluster" => CLUSTER
case _ =>
error("Deploy mode must be either client or cluster")
-1
}
if (clusterManager == YARN) {
// Make sure YARN is included in our build if we're trying to use it
if (!Utils.classIsLoadable(YARN_CLUSTER_SUBMIT_CLASS) && !Utils.isTesting) {
error(
"Could not load YARN classes. " +
"This copy of Spark may not have been compiled with YARN support.")
}
}
if (clusterManager == KUBERNETES) {
args.master = Utils.checkAndGetK8sMasterUrl(args.master)
// Make sure KUBERNETES is included in our build if we're trying to use it
if (!Utils.classIsLoadable(KUBERNETES_CLUSTER_SUBMIT_CLASS) && !Utils.isTesting) {
error(
"Could not load KUBERNETES classes. " +
"This copy of Spark may not have been compiled with KUBERNETES support.")
}
}
// Fail fast, the following modes are not supported or applicable
(clusterManager, deployMode) match {
case (STANDALONE, CLUSTER) if args.isPython =>
error("Cluster deploy mode is currently not supported for python " +
"applications on standalone clusters.")
case (STANDALONE, CLUSTER) if args.isR =>
error("Cluster deploy mode is currently not supported for R " +
"applications on standalone clusters.")
case (LOCAL, CLUSTER) =>
error("Cluster deploy mode is not compatible with master \\"local\\"")
case (_, CLUSTER) if isShell(args.primaryResource) =>
error("Cluster deploy mode is not applicable to Spark shells.")
case (_, CLUSTER) if isSqlShell(args.mainClass) =>
error("Cluster deploy mode is not applicable to Spark SQL shell.")
case (_, CLUSTER) if isThriftServer(args.mainClass) =>
error("Cluster deploy mode is not applicable to Spark Thrift server.")
case _ =>
}
// Update args.deployMode if it is null. It will be passed down as a Spark property later.
(args.deployMode, deployMode) match {
case (null, CLIENT) => args.deployMode = "client"
case (null, CLUSTER) => args.deployMode = "cluster"
case _ =>
}
val isYarnCluster = clusterManager == YARN && deployMode == CLUSTER
val isMesosCluster = clusterManager == MESOS && deployMode == CLUSTER
val isStandAloneCluster = clusterManager == STANDALONE && deployMode == CLUSTER
val isKubernetesCluster = clusterManager == KUBERNETES && deployMode == CLUSTER
val isKubernetesClient = clusterManager == KUBERNETES && deployMode == CLIENT
val isKubernetesClusterModeDriver = isKubernetesClient &&
sparkConf.getBoolean("spark.kubernetes.submitInDriver", false)
if (!isMesosCluster && !isStandAloneCluster) {
// Resolve maven dependencies if there are any and add classpath to jars. Add them to py-files
// too for packages that include Python code
val resolvedMavenCoordinates = DependencyUtils.resolveMavenDependencies(
args.packagesExclusions, args.packages, args.repositories, args.ivyRepoPath,
args.ivySettingsPath)
if (!StringUtils.isBlank(resolvedMavenCoordinates)) {
// In K8s client mode, when in the driver, add resolved jars early as we might need
// them at the submit time for artifact downloading.
// For example we might use the dependencies for downloading
// files from a Hadoop Compatible fs eg. S3. In this case the user might pass:
// --packages com.amazonaws:aws-java-sdk:1.7.4:org.apache.hadoop:hadoop-aws:2.7.6
if (isKubernetesClusterModeDriver) {
val loader = getSubmitClassLoader(sparkConf)
for (jar <- resolvedMavenCoordinates.split(",")) {
addJarToClasspath(jar, loader)
}
} else if (isKubernetesCluster) {
// We need this in K8s cluster mode so that we can upload local deps
// via the k8s application, like in cluster mode driver
childClasspath ++= resolvedMavenCoordinates.split(",")
} else {
args.jars = mergeFileLists(args.jars, resolvedMavenCoordinates)
if (args.isPython || isInternal(args.primaryResource)) {
args.pyFiles = mergeFileLists(args.pyFiles, resolvedMavenCoordinates)
}
}
}
// install any R packages that may have been passed through --jars or --packages.
// Spark Packages may contain R source code inside the jar.
if (args.isR && !StringUtils.isBlank(args.jars)) {
RPackageUtils.checkAndBuildRPackage(args.jars, printStream, args.verbose)
}
}
// update spark config from args
args.toSparkConf(Option(sparkConf))
val hadoopConf = conf.getOrElse(SparkHadoopUtil.newConfiguration(sparkConf))
val targetDir = Utils.createTempDir()
// Kerberos is not supported in standalone mode, and keytab support is not yet available
// in Mesos cluster mode.
if (clusterManager != STANDALONE
&& !isMesosCluster
&& args.principal != null
&& args.keytab != null) {
// If client mode, make sure the keytab is just a local path.
if (deployMode == CLIENT && Utils.isLocalUri(args.keytab)) {
args.keytab = new URI(args.keytab).getPath()
}
if (!Utils.isLocalUri(args.keytab)) {
require(new File(args.keytab).exists(), s"Keytab file: ${args.keytab} does not exist")
UserGroupInformation.loginUserFromKeytab(args.principal, args.keytab)
}
}
// Resolve glob path for different resources.
args.jars = Option(args.jars).map(resolveGlobPaths(_, hadoopConf)).orNull
args.files = Option(args.files).map(resolveGlobPaths(_, hadoopConf)).orNull
args.pyFiles = Option(args.pyFiles).map(resolveGlobPaths(_, hadoopConf)).orNull
args.archives = Option(args.archives).map(resolveGlobPaths(_, hadoopConf)).orNull
lazy val secMgr = new SecurityManager(sparkConf)
// In client mode, download remote files.
var localPrimaryResource: String = null
var localJars: String = null
var localPyFiles: String = null
if (deployMode == CLIENT) {
localPrimaryResource = Option(args.primaryResource).map {
downloadFile(_, targetDir, sparkConf, hadoopConf, secMgr)
}.orNull
localJars = Option(args.jars).map {
downloadFileList(_, targetDir, sparkConf, hadoopConf, secMgr)
}.orNull
localPyFiles = Option(args.pyFiles).map {
downloadFileList(_, targetDir, sparkConf, hadoopConf, secMgr)
}.orNull
if (isKubernetesClusterModeDriver) {
// Replace with the downloaded local jar path to avoid propagating hadoop compatible uris.
// Executors will get the jars from the Spark file server.
// Explicitly download the related files here
args.jars = renameResourcesToLocalFS(args.jars, localJars)
val localFiles = Option(args.files).map {
downloadFileList(_, targetDir, sparkConf, hadoopConf, secMgr)
}.orNull
args.files = renameResourcesToLocalFS(args.files, localFiles)
}
}
// When running in YARN, for some remote resources with scheme:
// 1. Hadoop FileSystem doesn't support them.
// 2. We explicitly bypass Hadoop FileSystem with "spark.yarn.dist.forceDownloadSchemes".
// We will download them to local disk prior to add to YARN's distributed cache.
// For yarn client mode, since we already download them with above code, so we only need to
// figure out the local path and replace the remote one.
if (clusterManager == YARN) {
val forceDownloadSchemes = sparkConf.get(FORCE_DOWNLOAD_SCHEMES)
def shouldDownload(scheme: String): Boolean = {
forceDownloadSchemes.contains("*") || forceDownloadSchemes.contains(scheme) ||
Try { FileSystem.getFileSystemClass(scheme, hadoopConf) }.isFailure
}
def downloadResource(resource: String): String = {
val uri = Utils.resolveURI(resource)
uri.getScheme match {
case "local" | "file" => resource
case e if shouldDownload(e) =>
val file = new File(targetDir, new Path(uri).getName)
if (file.exists()) {
file.toURI.toString
} else {
downloadFile(resource, targetDir, sparkConf, hadoopConf, secMgr)
}
case _ => uri.toString
}
}
args.primaryResource = Option(args.primaryResource).map { downloadResource }.orNull
args.files = Option(args.files).map { files =>
Utils.stringToSeq(files).map(downloadResource).mkString(",")
}.orNull
args.pyFiles = Option(args.pyFiles).map { pyFiles =>
Utils.stringToSeq(pyFiles).map(downloadResource).mkString(",")
}.orNull
args.jars = Option(args.jars).map { jars =>
Utils.stringToSeq(jars).map(downloadResource).mkString(",")
}.orNull
args.archives = Option(args.archives).map { archives =>
Utils.stringToSeq(archives).map(downloadResource).mkString(",")
}.orNull
}
// At this point, we have attempted to download all remote resources.
// Now we try to resolve the main class if our primary resource is a JAR.
if (args.mainClass == null && !args.isPython && !args.isR) {
try {
val uri = new URI(
Option(localPrimaryResource).getOrElse(args.primaryResource)
)
val fs = FileSystem.get(uri, hadoopConf)
Utils.tryWithResource(new JarInputStream(fs.open(new Path(uri)))) { jar =>
args.mainClass = jar.getManifest.getMainAttributes.getValue("Main-Class")
}
} catch {
case e: Throwable =>
error(
s"Failed to get main class in JAR with error '${e.getMessage}'. " +
" Please specify one with --class."
)
}
if (args.mainClass == null) {
// If we still can't figure out the main class at this point, blow up.
error("No main class set in JAR; please specify one with --class.")
}
}
// If we're running a python app, set the main class to our specific python runner
if (args.isPython && deployMode == CLIENT) {
if (args.primaryResource == PYSPARK_SHELL) {
args.mainClass = "org.apache.spark.api.python.PythonGatewayServer"
} else {
// If a python file is provided, add it to the child arguments and list of files to deploy.
// Usage: PythonAppRunner <main python file> <extra python files> [app arguments]
args.mainClass = "org.apache.spark.deploy.PythonRunner"
args.childArgs = ArrayBuffer(localPrimaryResource, localPyFiles) ++ args.childArgs
}
}
// Non-PySpark applications can need Python dependencies.
if (deployMode == CLIENT && clusterManager != YARN) {
// The YARN backend handles python files differently, so don't merge the lists.
args.files = mergeFileLists(args.files, args.pyFiles)
}
if (localPyFiles != null) {
sparkConf.set(SUBMIT_PYTHON_FILES, localPyFiles.split(",").toSeq)
}
// In YARN mode for an R app, add the SparkR package archive and the R package
// archive containing all of the built R libraries to archives so that they can
// be distributed with the job
if (args.isR && clusterManager == YARN) {
val sparkRPackagePath = RUtils.localSparkRPackagePath
if (sparkRPackagePath.isEmpty) {
error("SPARK_HOME does not exist for R application in YARN mode.")
}
val sparkRPackageFile = new File(sparkRPackagePath.get, SPARKR_PACKAGE_ARCHIVE)
if (!sparkRPackageFile.exists()) {
error(s"$SPARKR_PACKAGE_ARCHIVE does not exist for R application in YARN mode.")
}
val sparkRPackageURI = Utils.resolveURI(sparkRPackageFile.getAbsolutePath).toString
// Distribute the SparkR package.
// Assigns a symbol link name "sparkr" to the shipped package.
args.archives = mergeFileLists(args.archives, sparkRPackageURI + "#sparkr")
// Distribute the R package archive containing all the built R packages.
if (!RUtils.rPackages.isEmpty) {
val rPackageFile =
RPackageUtils.zipRLibraries(new File(RUtils.rPackages.get), R_PACKAGE_ARCHIVE)
if (!rPackageFile.exists()) {
error("Failed to zip all the built R packages.")
}
val rPackageURI = Utils.resolveURI(rPackageFile.getAbsolutePath).toString
// Assigns a symbol link name "rpkg" to the shipped package.
args.archives = mergeFileLists(args.archives, rPackageURI + "#rpkg")
}
}
// TODO: Support distributing R packages with standalone cluster
if (args.isR && clusterManager == STANDALONE && !RUtils.rPackages.isEmpty) {
error("Distributing R packages with standalone cluster is not supported.")
}
// TODO: Support distributing R packages with mesos cluster
if (args.isR && clusterManager == MESOS && !RUtils.rPackages.isEmpty) {
error("Distributing R packages with mesos cluster is not supported.")
}
// If we're running an R app, set the main class to our specific R runner
if (args.isR && deployMode == CLIENT) {
if (args.primaryResource == SPARKR_SHELL) {
args.mainClass = "org.apache.spark.api.r.RBackend"
} else {
// If an R file is provided, add it to the child arguments and list of files to deploy.
// Usage: RRunner <main R file> [app arguments]
args.mainClass = "org.apache.spark.deploy.RRunner"
args.childArgs = ArrayBuffer(localPrimaryResource) ++ args.childArgs
args.files = mergeFileLists(args.files, args.primaryResource)
}
}
if (isYarnCluster && args.isR) {
// In yarn-cluster mode for an R app, add primary resource to files
// that can be distributed with the job
args.files = mergeFileLists(args.files, args.primaryResource)
}
// Special flag to avoid deprecation warnings at the client
sys.props("SPARK_SUBMIT") = "true"
// A list of rules to map each argument to system properties or command-line options in
// each deploy mode; we iterate through these below
val options = List[OptionAssigner](
// All cluster managers
OptionAssigner(args.master, ALL_CLUSTER_MGRS, ALL_DEPLOY_MODES, confKey = "spark.master"),
OptionAssigner(args.deployMode, ALL_CLUSTER_MGRS, ALL_DEPLOY_MODES,
confKey = SUBMIT_DEPLOY_MODE.key),
OptionAssigner(args.name, ALL_CLUSTER_MGRS, ALL_DEPLOY_MODES, confKey = "spark.app.name"),
OptionAssigner(args.ivyRepoPath, ALL_CLUSTER_MGRS, CLIENT, confKey = "spark.jars.ivy"),
OptionAssigner(args.driverMemory, ALL_CLUSTER_MGRS, CLIENT,
confKey = DRIVER_MEMORY.key),
OptionAssigner(args.driverExtraClassPath, ALL_CLUSTER_MGRS, ALL_DEPLOY_MODES,
confKey = DRIVER_CLASS_PATH.key),
OptionAssigner(args.driverExtraJavaOptions, ALL_CLUSTER_MGRS, ALL_DEPLOY_MODES,
confKey = DRIVER_JAVA_OPTIONS.key),
OptionAssigner(args.driverExtraLibraryPath, ALL_CLUSTER_MGRS, ALL_DEPLOY_MODES,
confKey = DRIVER_LIBRARY_PATH.key),
OptionAssigner(args.principal, ALL_CLUSTER_MGRS, ALL_DEPLOY_MODES,
confKey = PRINCIPAL.key),
OptionAssigner(args.keytab, ALL_CLUSTER_MGRS, ALL_DEPLOY_MODES,
confKey = KEYTAB.key),
OptionAssigner(args.pyFiles, ALL_CLUSTER_MGRS, CLUSTER, confKey = SUBMIT_PYTHON_FILES.key),
// Propagate attributes for dependency resolution at the driver side
OptionAssigner(args.packages, STANDALONE | MESOS | KUBERNETES,
CLUSTER, confKey = "spark.jars.packages"),
OptionAssigner(args.repositories, STANDALONE | MESOS | KUBERNETES,
CLUSTER, confKey = "spark.jars.repositories"),
OptionAssigner(args.ivyRepoPath, STANDALONE | MESOS | KUBERNETES,
CLUSTER, confKey = "spark.jars.ivy"),
OptionAssigner(args.packagesExclusions, STANDALONE | MESOS | KUBERNETES,
CLUSTER, confKey = "spark.jars.excludes"),
// Yarn only
OptionAssigner(args.queue, YARN, ALL_DEPLOY_MODES, confKey = "spark.yarn.queue"),
OptionAssigner(args.pyFiles, YARN, ALL_DEPLOY_MODES, confKey = "spark.yarn.dist.pyFiles",
mergeFn = Some(mergeFileLists(_, _))),
OptionAssigner(args.jars, YARN, ALL_DEPLOY_MODES, confKey = "spark.yarn.dist.jars",
mergeFn = Some(mergeFileLists(_, _))),
OptionAssigner(args.files, YARN, ALL_DEPLOY_MODES, confKey = "spark.yarn.dist.files",
mergeFn = Some(mergeFileLists(_, _))),
OptionAssigner(args.archives, YARN, ALL_DEPLOY_MODES, confKey = "spark.yarn.dist.archives",
mergeFn = Some(mergeFileLists(_, _))),
// Other options
OptionAssigner(args.numExecutors, YARN | KUBERNETES, ALL_DEPLOY_MODES,
confKey = EXECUTOR_INSTANCES.key),
OptionAssigner(args.executorCores, STANDALONE | YARN | KUBERNETES, ALL_DEPLOY_MODES,
confKey = EXECUTOR_CORES.key),
OptionAssigner(args.executorMemory, STANDALONE | MESOS | YARN | KUBERNETES, ALL_DEPLOY_MODES,
confKey = EXECUTOR_MEMORY.key),
OptionAssigner(args.totalExecutorCores, STANDALONE | MESOS | KUBERNETES, ALL_DEPLOY_MODES,
confKey = CORES_MAX.key),
OptionAssigner(args.files, LOCAL | STANDALONE | MESOS | KUBERNETES, ALL_DEPLOY_MODES,
confKey = FILES.key),
OptionAssigner(args.jars, LOCAL, CLIENT, confKey = JARS.key),
OptionAssigner(args.jars, STANDALONE | MESOS | KUBERNETES, ALL_DEPLOY_MODES,
confKey = JARS.key),
OptionAssigner(args.driverMemory, STANDALONE | MESOS | YARN | KUBERNETES, CLUSTER,
confKey = DRIVER_MEMORY.key),
OptionAssigner(args.driverCores, STANDALONE | MESOS | YARN | KUBERNETES, CLUSTER,
confKey = DRIVER_CORES.key),
OptionAssigner(args.supervise.toString, STANDALONE | MESOS, CLUSTER,
confKey = DRIVER_SUPERVISE.key),
OptionAssigner(args.ivyRepoPath, STANDALONE, CLUSTER, confKey = "spark.jars.ivy"),
// An internal option used only for spark-shell to add user jars to repl's classloader,
// previously it uses "spark.jars" or "spark.yarn.dist.jars" which now may be pointed to
// remote jars, so adding a new option to only specify local jars for spark-shell internally.
OptionAssigner(localJars, ALL_CLUSTER_MGRS, CLIENT, confKey = "spark.repl.local.jars")
)
// In client mode, launch the application main class directly
// In addition, add the main application jar and any added jars (if any) to the classpath
if (deployMode == CLIENT) {
childMainClass = args.mainClass
if (localPrimaryResource != null && isUserJar(localPrimaryResource)) {
childClasspath += localPrimaryResource
}
if (localJars != null) { childClasspath ++= localJars.split(",") }
}
// Add the main application jar and any added jars to classpath in case YARN client
// requires these jars.
// This assumes both primaryResource and user jars are local jars, or already downloaded
// to local by configuring "spark.yarn.dist.forceDownloadSchemes", otherwise it will not be
// added to the classpath of YARN client.
if (isYarnCluster) {
if (isUserJar(args.primaryResource)) {
childClasspath += args.primaryResource
}
if (args.jars != null) { childClasspath ++= args.jars.split(",") }
}
if (deployMode == CLIENT) {
if (args.childArgs != null) { childArgs ++= args.childArgs }
}
// Map all arguments to command-line options or system properties for our chosen mode
for (opt <- options) {
if (opt.value != null &&
(deployMode & opt.deployMode) != 0 &&
(clusterManager & opt.clusterManager) != 0) {
if (opt.clOption != null) { childArgs += (opt.clOption, opt.value) }
if (opt.confKey != null) {
if (opt.mergeFn.isDefined && sparkConf.contains(opt.confKey)) {
sparkConf.set(opt.confKey, opt.mergeFn.get.apply(sparkConf.get(opt.confKey), opt.value))
} else {
sparkConf.set(opt.confKey, opt.value)
}
}
}
}
// In case of shells, spark.ui.showConsoleProgress can be true by default or by user.
if (isShell(args.primaryResource) && !sparkConf.contains(UI_SHOW_CONSOLE_PROGRESS)) {
sparkConf.set(UI_SHOW_CONSOLE_PROGRESS, true)
}
// Add the application jar automatically so the user doesn't have to call sc.addJar
// For YARN cluster mode, the jar is already distributed on each node as "app.jar"
// For python and R files, the primary resource is already distributed as a regular file
if (!isYarnCluster && !args.isPython && !args.isR) {
var jars = sparkConf.get(JARS)
if (isUserJar(args.primaryResource)) {
jars = jars ++ Seq(args.primaryResource)
}
sparkConf.set(JARS, jars)
}
// In standalone cluster mode, use the REST client to submit the application (Spark 1.3+).
// All Spark parameters are expected to be passed to the client through system properties.
if (args.isStandaloneCluster) {
if (args.useRest) {
childMainClass = REST_CLUSTER_SUBMIT_CLASS
childArgs += (args.primaryResource, args.mainClass)
} else {
// In legacy standalone cluster mode, use Client as a wrapper around the user class
childMainClass = STANDALONE_CLUSTER_SUBMIT_CLASS
if (args.supervise) { childArgs += "--supervise" }
Option(args.driverMemory).foreach { m => childArgs += ("--memory", m) }
Option(args.driverCores).foreach { c => childArgs += ("--cores", c) }
childArgs += "launch"
childArgs += (args.master, args.primaryResource, args.mainClass)
}
if (args.childArgs != null) {
childArgs ++= args.childArgs
}
}
// Let YARN know it's a pyspark app, so it distributes needed libraries.
if (clusterManager == YARN) {
if (args.isPython) {
sparkConf.set("spark.yarn.isPython", "true")
}
}
if ((clusterManager == MESOS || clusterManager == KUBERNETES)
&& UserGroupInformation.isSecurityEnabled) {
setRMPrincipal(sparkConf)
}
// In yarn-cluster mode, use yarn.Client as a wrapper around the user class
if (isYarnCluster) {
childMainClass = YARN_CLUSTER_SUBMIT_CLASS
if (args.isPython) {
childArgs += ("--primary-py-file", args.primaryResource)
childArgs += ("--class", "org.apache.spark.deploy.PythonRunner")
} else if (args.isR) {
val mainFile = new Path(args.primaryResource).getName
childArgs += ("--primary-r-file", mainFile)
childArgs += ("--class", "org.apache.spark.deploy.RRunner")
} else {
if (args.primaryResource != SparkLauncher.NO_RESOURCE) {
childArgs += ("--jar", args.primaryResource)
}
childArgs += ("--class", args.mainClass)
}
if (args.childArgs != null) {
args.childArgs.foreach { arg => childArgs += ("--arg", arg) }
}
}
if (isMesosCluster) {
assert(args.useRest, "Mesos cluster mode is only supported through the REST submission API")
childMainClass = REST_CLUSTER_SUBMIT_CLASS
if (args.isPython) {
// Second argument is main class
childArgs += (args.primaryResource, "")
if (args.pyFiles != null) {
sparkConf.set(SUBMIT_PYTHON_FILES, args.pyFiles.split(",").toSeq)
}
} else if (args.isR) {
// Second argument is main class
childArgs += (args.primaryResource, "")
} else {
childArgs += (args.primaryResource, args.mainClass)
}
if (args.childArgs != null) {
childArgs ++= args.childArgs
}
}
if (isKubernetesCluster) {
childMainClass = KUBERNETES_CLUSTER_SUBMIT_CLASS
if (args.primaryResource != SparkLauncher.NO_RESOURCE) {
if (args.isPython) {
childArgs ++= Array("--primary-py-file", args.primaryResource)
childArgs ++= Array("--main-class", "org.apache.spark.deploy.PythonRunner")
} else if (args.isR) {
childArgs ++= Array("--primary-r-file", args.primaryResource)
childArgs ++= Array("--main-class", "org.apache.spark.deploy.RRunner")
}
else {
childArgs ++= Array("--primary-java-resource", args.primaryResource)
childArgs ++= Array("--main-class", args.mainClass)
}
} else {
childArgs ++= Array("--main-class", args.mainClass)
}
if (args.childArgs != null) {
args.childArgs.foreach { arg =>
childArgs += ("--arg", arg)
}
}
// Pass the proxyUser to the k8s app so it is possible to add it to the driver args
if (args.proxyUser != null) {
childArgs += ("--proxy-user", args.proxyUser)
}
}
// Load any properties specified through --conf and the default properties file
for ((k, v) <- args.sparkProperties) {
sparkConf.setIfMissing(k, v)
}
// Ignore invalid spark.driver.host in cluster modes.
if (deployMode == CLUSTER) {
sparkConf.remove(DRIVER_HOST_ADDRESS)
}
// Resolve paths in certain spark properties
val pathConfigs = Seq(
JARS.key,
FILES.key,
"spark.yarn.dist.files",
"spark.yarn.dist.archives",
"spark.yarn.dist.jars")
pathConfigs.foreach { config =>
// Replace old URIs with resolved URIs, if they exist
sparkConf.getOption(config).foreach { oldValue =>
sparkConf.set(config, Utils.resolveURIs(oldValue))
}
}
// Resolve and format python file paths properly before adding them to the PYTHONPATH.
// The resolving part is redundant in the case of --py-files, but necessary if the user
// explicitly sets `spark.submit.pyFiles` in his/her default properties file.
val pyFiles = sparkConf.get(SUBMIT_PYTHON_FILES)
val resolvedPyFiles = Utils.resolveURIs(pyFiles.mkString(","))
val formattedPyFiles = if (deployMode != CLUSTER) {
PythonRunner.formatPaths(resolvedPyFiles).mkString(",")
} else {
// Ignoring formatting python path in yarn and mesos cluster mode, these two modes
// support dealing with remote python files, they could distribute and add python files
// locally.
resolvedPyFiles
}
sparkConf.set(SUBMIT_PYTHON_FILES, formattedPyFiles.split(",").toSeq)
(childArgs.toSeq, childClasspath.toSeq, sparkConf, childMainClass)
}
private def renameResourcesToLocalFS(resources: String, localResources: String): String = {
if (resources != null && localResources != null) {
val localResourcesSeq = Utils.stringToSeq(localResources)
Utils.stringToSeq(resources).map { resource =>
val filenameRemote = FilenameUtils.getName(new URI(resource).getPath)
localResourcesSeq.find { localUri =>
val filenameLocal = FilenameUtils.getName(new URI(localUri).getPath)
filenameRemote == filenameLocal
}.getOrElse(resource)
}.mkString(",")
} else {
resources
}
}
// [SPARK-20328]. HadoopRDD calls into a Hadoop library that fetches delegation tokens with
// renewer set to the YARN ResourceManager. Since YARN isn't configured in Mesos or Kubernetes
// mode, we must trick it into thinking we're YARN.
private def setRMPrincipal(sparkConf: SparkConf): Unit = {
val shortUserName = UserGroupInformation.getCurrentUser.getShortUserName
val key = s"spark.hadoop.${YarnConfiguration.RM_PRINCIPAL}"
logInfo(s"Setting ${key} to ${shortUserName}")
sparkConf.set(key, shortUserName)
}
private def getSubmitClassLoader(sparkConf: SparkConf): MutableURLClassLoader = {
val loader =
if (sparkConf.get(DRIVER_USER_CLASS_PATH_FIRST)) {
new ChildFirstURLClassLoader(new Array[URL](0),
Thread.currentThread.getContextClassLoader)
} else {
new MutableURLClassLoader(new Array[URL](0),
Thread.currentThread.getContextClassLoader)
}
Thread.currentThread.setContextClassLoader(loader)
loader
}
/**
* Run the main method of the child class using the submit arguments.
*
* This runs in two steps. First, we prepare the launch environment by setting up
* the appropriate classpath, system properties, and application arguments for
* running the child main class based on the cluster manager and the deploy mode.
* Second, we use this launch environment to invoke the main method of the child
* main class.
*
* Note that this main class will not be the one provided by the user if we're
* running cluster deploy mode or python applications.
*/
private def runMain(args: SparkSubmitArguments, uninitLog: Boolean): Unit = {
val (childArgs, childClasspath, sparkConf, childMainClass) = prepareSubmitEnvironment(args)
// Let the main class re-initialize the logging system once it starts.
if (uninitLog) {
Logging.uninitialize()
}
if (args.verbose) {
logInfo(s"Main class:\\n$childMainClass")
logInfo(s"Arguments:\\n${childArgs.mkString("\\n")}")
// sysProps may contain sensitive information, so redact before printing
logInfo(s"Spark config:\\n${Utils.redact(sparkConf.getAll.toMap).mkString("\\n")}")
logInfo(s"Classpath elements:\\n${childClasspath.mkString("\\n")}")
logInfo("\\n")
}
val loader = getSubmitClassLoader(sparkConf)
for (jar <- childClasspath) {
addJarToClasspath(jar, loader)
}
var mainClass: Class[_] = null
try {
mainClass = Utils.classForName(childMainClass)
} catch {
case e: ClassNotFoundException =>
logError(s"Failed to load class $childMainClass.")
if (childMainClass.contains("thriftserver")) {
logInfo(s"Failed to load main class $childMainClass.")
logInfo("You need to build Spark with -Phive and -Phive-thriftserver.")
}
throw new SparkUserAppException(CLASS_NOT_FOUND_EXIT_STATUS)
case e: NoClassDefFoundError =>
logError(s"Failed to load $childMainClass: ${e.getMessage()}")
if (e.getMessage.contains("org/apache/hadoop/hive")) {
logInfo(s"Failed to load hive class.")
logInfo("You need to build Spark with -Phive and -Phive-thriftserver.")
}
throw new SparkUserAppException(CLASS_NOT_FOUND_EXIT_STATUS)
}
val app: SparkApplication = if (classOf[SparkApplication].isAssignableFrom(mainClass)) {
mainClass.getConstructor().newInstance().asInstanceOf[SparkApplication]
} else {
new JavaMainApplication(mainClass)
}
@tailrec
def findCause(t: Throwable): Throwable = t match {
case e: UndeclaredThrowableException =>
if (e.getCause() != null) findCause(e.getCause()) else e
case e: InvocationTargetException =>
if (e.getCause() != null) findCause(e.getCause()) else e
case e: Throwable =>
e
}
try {
app.start(childArgs.toArray, sparkConf)
} catch {
case t: Throwable =>
throw findCause(t)
}
}
/** Throw a SparkException with the given error message. */
private def error(msg: String): Unit = throw new SparkException(msg)
}
/**
* This entry point is used by the launcher library to start in-process Spark applications.
*/
private[spark] object InProcessSparkSubmit {
def main(args: Array[String]): Unit = {
val submit = new SparkSubmit()
submit.doSubmit(args)
}
}
object SparkSubmit extends CommandLineUtils with Logging {
// Cluster managers
private val YARN = 1
private val STANDALONE = 2
private val MESOS = 4
private val LOCAL = 8
private val KUBERNETES = 16
private val ALL_CLUSTER_MGRS = YARN | STANDALONE | MESOS | LOCAL | KUBERNETES
// Deploy modes
private val CLIENT = 1
private val CLUSTER = 2
private val ALL_DEPLOY_MODES = CLIENT | CLUSTER
// Special primary resource names that represent shells rather than application jars.
private val SPARK_SHELL = "spark-shell"
private val PYSPARK_SHELL = "pyspark-shell"
private val SPARKR_SHELL = "sparkr-shell"
private val SPARKR_PACKAGE_ARCHIVE = "sparkr.zip"
private val R_PACKAGE_ARCHIVE = "rpkg.zip"
private val CLASS_NOT_FOUND_EXIT_STATUS = 101
// Following constants are visible for testing.
private[deploy] val YARN_CLUSTER_SUBMIT_CLASS =
"org.apache.spark.deploy.yarn.YarnClusterApplication"
private[deploy] val REST_CLUSTER_SUBMIT_CLASS = classOf[RestSubmissionClientApp].getName()
private[deploy] val STANDALONE_CLUSTER_SUBMIT_CLASS = classOf[ClientApp].getName()
private[deploy] val KUBERNETES_CLUSTER_SUBMIT_CLASS =
"org.apache.spark.deploy.k8s.submit.KubernetesClientApplication"
override def main(args: Array[String]): Unit = {
val submit = new SparkSubmit() {
self =>
override protected def parseArguments(args: Array[String]): SparkSubmitArguments = {
new SparkSubmitArguments(args) {
override protected def logInfo(msg: => String): Unit = self.logInfo(msg)
override protected def logWarning(msg: => String): Unit = self.logWarning(msg)
override protected def logError(msg: => String): Unit = self.logError(msg)
}
}
override protected def logInfo(msg: => String): Unit = printMessage(msg)
override protected def logWarning(msg: => String): Unit = printMessage(s"Warning: $msg")
override protected def logError(msg: => String): Unit = printMessage(s"Error: $msg")
override def doSubmit(args: Array[String]): Unit = {
try {
super.doSubmit(args)
} catch {
case e: SparkUserAppException =>
exitFn(e.exitCode)
}
}
}
submit.doSubmit(args)
}
/**
* Return whether the given primary resource represents a user jar.
*/
private[deploy] def isUserJar(res: String): Boolean = {
!isShell(res) && !isPython(res) && !isInternal(res) && !isR(res)
}
/**
* Return whether the given primary resource represents a shell.
*/
private[deploy] def isShell(res: String): Boolean = {
(res == SPARK_SHELL || res == PYSPARK_SHELL || res == SPARKR_SHELL)
}
/**
* Return whether the given main class represents a sql shell.
*/
private[deploy] def isSqlShell(mainClass: String): Boolean = {
mainClass == "org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver"
}
/**
* Return whether the given main class represents a thrift server.
*/
private def isThriftServer(mainClass: String): Boolean = {
mainClass == "org.apache.spark.sql.hive.thriftserver.HiveThriftServer2"
}
/**
* Return whether the given primary resource requires running python.
*/
private[deploy] def isPython(res: String): Boolean = {
res != null && res.endsWith(".py") || res == PYSPARK_SHELL
}
/**
* Return whether the given primary resource requires running R.
*/
private[deploy] def isR(res: String): Boolean = {
res != null && (res.endsWith(".R") || res.endsWith(".r")) || res == SPARKR_SHELL
}
private[deploy] def isInternal(res: String): Boolean = {
res == SparkLauncher.NO_RESOURCE
}
}
/** Provides utility functions to be used inside SparkSubmit. */
private[spark] object SparkSubmitUtils {
// Exposed for testing
var printStream = SparkSubmit.printStream
// Exposed for testing.
// These components are used to make the default exclusion rules for Spark dependencies.
// We need to specify each component explicitly, otherwise we miss
// spark-streaming utility components. Underscore is there to differentiate between
// spark-streaming_2.1x and spark-streaming-kafka-0-10-assembly_2.1x
val IVY_DEFAULT_EXCLUDES = Seq("catalyst_", "core_", "graphx_", "kvstore_", "launcher_", "mllib_",
"mllib-local_", "network-common_", "network-shuffle_", "repl_", "sketch_", "sql_", "streaming_",
"tags_", "unsafe_")
/**
* Represents a Maven Coordinate
* @param groupId the groupId of the coordinate
* @param artifactId the artifactId of the coordinate
* @param version the version of the coordinate
*/
private[deploy] case class MavenCoordinate(groupId: String, artifactId: String, version: String) {
override def toString: String = s"$groupId:$artifactId:$version"
}
/**
* Extracts maven coordinates from a comma-delimited string. Coordinates should be provided
* in the format `groupId:artifactId:version` or `groupId/artifactId:version`.
* @param coordinates Comma-delimited string of maven coordinates
* @return Sequence of Maven coordinates
*/
def extractMavenCoordinates(coordinates: String): Seq[MavenCoordinate] = {
coordinates.split(",").map { p =>
val splits = p.replace("/", ":").split(":")
require(splits.length == 3, s"Provided Maven Coordinates must be in the form " +
s"'groupId:artifactId:version'. The coordinate provided is: $p")
require(splits(0) != null && splits(0).trim.nonEmpty, s"The groupId cannot be null or " +
s"be whitespace. The groupId provided is: ${splits(0)}")
require(splits(1) != null && splits(1).trim.nonEmpty, s"The artifactId cannot be null or " +
s"be whitespace. The artifactId provided is: ${splits(1)}")
require(splits(2) != null && splits(2).trim.nonEmpty, s"The version cannot be null or " +
s"be whitespace. The version provided is: ${splits(2)}")
new MavenCoordinate(splits(0), splits(1), splits(2))
}
}
/** Path of the local Maven cache. */
private[spark] def m2Path: File = {
if (Utils.isTesting) {
// test builds delete the maven cache, and this can cause flakiness
new File("dummy", ".m2" + File.separator + "repository")
} else {
new File(System.getProperty("user.home"), ".m2" + File.separator + "repository")
}
}
/**
* Extracts maven coordinates from a comma-delimited string
* @param defaultIvyUserDir The default user path for Ivy
* @return A ChainResolver used by Ivy to search for and resolve dependencies.
*/
def createRepoResolvers(defaultIvyUserDir: File): ChainResolver = {
// We need a chain resolver if we want to check multiple repositories
val cr = new ChainResolver
cr.setName("spark-list")
val localM2 = new IBiblioResolver
localM2.setM2compatible(true)
localM2.setRoot(m2Path.toURI.toString)
localM2.setUsepoms(true)
localM2.setName("local-m2-cache")
cr.add(localM2)
val localIvy = new FileSystemResolver
val localIvyRoot = new File(defaultIvyUserDir, "local")
localIvy.setLocal(true)
localIvy.setRepository(new FileRepository(localIvyRoot))
val ivyPattern = Seq(localIvyRoot.getAbsolutePath, "[organisation]", "[module]", "[revision]",
"ivys", "ivy.xml").mkString(File.separator)
localIvy.addIvyPattern(ivyPattern)
val artifactPattern = Seq(localIvyRoot.getAbsolutePath, "[organisation]", "[module]",
"[revision]", "[type]s", "[artifact](-[classifier]).[ext]").mkString(File.separator)
localIvy.addArtifactPattern(artifactPattern)
localIvy.setName("local-ivy-cache")
cr.add(localIvy)
// the biblio resolver resolves POM declared dependencies
val br: IBiblioResolver = new IBiblioResolver
br.setM2compatible(true)
br.setUsepoms(true)
br.setName("central")
cr.add(br)
val sp: IBiblioResolver = new IBiblioResolver
sp.setM2compatible(true)
sp.setUsepoms(true)
sp.setRoot("https://dl.bintray.com/spark-packages/maven")
sp.setName("spark-packages")
cr.add(sp)
cr
}
/**
* Output a comma-delimited list of paths for the downloaded jars to be added to the classpath
* (will append to jars in SparkSubmit).
* @param artifacts Sequence of dependencies that were resolved and retrieved
* @param cacheDirectory directory where jars are cached
* @return a comma-delimited list of paths for the dependencies
*/
def resolveDependencyPaths(
artifacts: Array[AnyRef],
cacheDirectory: File): String = {
artifacts.map { artifactInfo =>
val artifact = artifactInfo.asInstanceOf[Artifact].getModuleRevisionId
cacheDirectory.getAbsolutePath + File.separator +
s"${artifact.getOrganisation}_${artifact.getName}-${artifact.getRevision}.jar"
}.mkString(",")
}
/** Adds the given maven coordinates to Ivy's module descriptor. */
def addDependenciesToIvy(
md: DefaultModuleDescriptor,
artifacts: Seq[MavenCoordinate],
ivyConfName: String): Unit = {
artifacts.foreach { mvn =>
val ri = ModuleRevisionId.newInstance(mvn.groupId, mvn.artifactId, mvn.version)
val dd = new DefaultDependencyDescriptor(ri, false, false)
dd.addDependencyConfiguration(ivyConfName, ivyConfName + "(runtime)")
// scalastyle:off println
printStream.println(s"${dd.getDependencyId} added as a dependency")
// scalastyle:on println
md.addDependency(dd)
}
}
/** Add exclusion rules for dependencies already included in the spark-assembly */
def addExclusionRules(
ivySettings: IvySettings,
ivyConfName: String,
md: DefaultModuleDescriptor): Unit = {
// Add scala exclusion rule
md.addExcludeRule(createExclusion("*:scala-library:*", ivySettings, ivyConfName))
IVY_DEFAULT_EXCLUDES.foreach { comp =>
md.addExcludeRule(createExclusion(s"org.apache.spark:spark-$comp*:*", ivySettings,
ivyConfName))
}
}
/**
* Build Ivy Settings using options with default resolvers
* @param remoteRepos Comma-delimited string of remote repositories other than maven central
* @param ivyPath The path to the local ivy repository
* @return An IvySettings object
*/
def buildIvySettings(remoteRepos: Option[String], ivyPath: Option[String]): IvySettings = {
val ivySettings: IvySettings = new IvySettings
processIvyPathArg(ivySettings, ivyPath)
// create a pattern matcher
ivySettings.addMatcher(new GlobPatternMatcher)
// create the dependency resolvers
val repoResolver = createRepoResolvers(ivySettings.getDefaultIvyUserDir)
ivySettings.addResolver(repoResolver)
ivySettings.setDefaultResolver(repoResolver.getName)
processRemoteRepoArg(ivySettings, remoteRepos)
ivySettings
}
/**
* Load Ivy settings from a given filename, using supplied resolvers
* @param settingsFile Path to Ivy settings file
* @param remoteRepos Comma-delimited string of remote repositories other than maven central
* @param ivyPath The path to the local ivy repository
* @return An IvySettings object
*/
def loadIvySettings(
settingsFile: String,
remoteRepos: Option[String],
ivyPath: Option[String]): IvySettings = {
val file = new File(settingsFile)
require(file.exists(), s"Ivy settings file $file does not exist")
require(file.isFile(), s"Ivy settings file $file is not a normal file")
val ivySettings: IvySettings = new IvySettings
try {
ivySettings.load(file)
} catch {
case e @ (_: IOException | _: ParseException) =>
throw new SparkException(s"Failed when loading Ivy settings from $settingsFile", e)
}
processIvyPathArg(ivySettings, ivyPath)
processRemoteRepoArg(ivySettings, remoteRepos)
ivySettings
}
/* Set ivy settings for location of cache, if option is supplied */
private def processIvyPathArg(ivySettings: IvySettings, ivyPath: Option[String]): Unit = {
ivyPath.filterNot(_.trim.isEmpty).foreach { alternateIvyDir =>
ivySettings.setDefaultIvyUserDir(new File(alternateIvyDir))
ivySettings.setDefaultCache(new File(alternateIvyDir, "cache"))
}
}
/* Add any optional additional remote repositories */
private def processRemoteRepoArg(ivySettings: IvySettings, remoteRepos: Option[String]): Unit = {
remoteRepos.filterNot(_.trim.isEmpty).map(_.split(",")).foreach { repositoryList =>
val cr = new ChainResolver
cr.setName("user-list")
// add current default resolver, if any
Option(ivySettings.getDefaultResolver).foreach(cr.add)
// add additional repositories, last resolution in chain takes precedence
repositoryList.zipWithIndex.foreach { case (repo, i) =>
val brr: IBiblioResolver = new IBiblioResolver
brr.setM2compatible(true)
brr.setUsepoms(true)
brr.setRoot(repo)
brr.setName(s"repo-${i + 1}")
cr.add(brr)
// scalastyle:off println
printStream.println(s"$repo added as a remote repository with the name: ${brr.getName}")
// scalastyle:on println
}
ivySettings.addResolver(cr)
ivySettings.setDefaultResolver(cr.getName)
}
}
/** A nice function to use in tests as well. Values are dummy strings. */
def getModuleDescriptor: DefaultModuleDescriptor = DefaultModuleDescriptor.newDefaultInstance(
// Include UUID in module name, so multiple clients resolving maven coordinate at the same time
// do not modify the same resolution file concurrently.
ModuleRevisionId.newInstance("org.apache.spark",
s"spark-submit-parent-${UUID.randomUUID.toString}",
"1.0"))
/**
* Clear ivy resolution from current launch. The resolution file is usually at
* ~/.ivy2/org.apache.spark-spark-submit-parent-$UUID-default.xml,
* ~/.ivy2/resolved-org.apache.spark-spark-submit-parent-$UUID-1.0.xml, and
* ~/.ivy2/resolved-org.apache.spark-spark-submit-parent-$UUID-1.0.properties.
* Since each launch will have its own resolution files created, delete them after
* each resolution to prevent accumulation of these files in the ivy cache dir.
*/
private def clearIvyResolutionFiles(
mdId: ModuleRevisionId,
ivySettings: IvySettings,
ivyConfName: String): Unit = {
val currentResolutionFiles = Seq(
s"${mdId.getOrganisation}-${mdId.getName}-$ivyConfName.xml",
s"resolved-${mdId.getOrganisation}-${mdId.getName}-${mdId.getRevision}.xml",
s"resolved-${mdId.getOrganisation}-${mdId.getName}-${mdId.getRevision}.properties"
)
currentResolutionFiles.foreach { filename =>
new File(ivySettings.getDefaultCache, filename).delete()
}
}
/**
* Resolves any dependencies that were supplied through maven coordinates
* @param coordinates Comma-delimited string of maven coordinates
* @param ivySettings An IvySettings containing resolvers to use
* @param exclusions Exclusions to apply when resolving transitive dependencies
* @return The comma-delimited path to the jars of the given maven artifacts including their
* transitive dependencies
*/
def resolveMavenCoordinates(
coordinates: String,
ivySettings: IvySettings,
exclusions: Seq[String] = Nil,
isTest: Boolean = false): String = {
if (coordinates == null || coordinates.trim.isEmpty) {
""
} else {
val sysOut = System.out
// Default configuration name for ivy
val ivyConfName = "default"
// A Module descriptor must be specified. Entries are dummy strings
val md = getModuleDescriptor
md.setDefaultConf(ivyConfName)
try {
// To prevent ivy from logging to system out
System.setOut(printStream)
val artifacts = extractMavenCoordinates(coordinates)
// Directories for caching downloads through ivy and storing the jars when maven coordinates
// are supplied to spark-submit
val packagesDirectory: File = new File(ivySettings.getDefaultIvyUserDir, "jars")
// scalastyle:off println
printStream.println(
s"Ivy Default Cache set to: ${ivySettings.getDefaultCache.getAbsolutePath}")
printStream.println(s"The jars for the packages stored in: $packagesDirectory")
// scalastyle:on println
val ivy = Ivy.newInstance(ivySettings)
// Set resolve options to download transitive dependencies as well
val resolveOptions = new ResolveOptions
resolveOptions.setTransitive(true)
val retrieveOptions = new RetrieveOptions
// Turn downloading and logging off for testing
if (isTest) {
resolveOptions.setDownload(false)
resolveOptions.setLog(LogOptions.LOG_QUIET)
retrieveOptions.setLog(LogOptions.LOG_QUIET)
} else {
resolveOptions.setDownload(true)
}
// Add exclusion rules for Spark and Scala Library
addExclusionRules(ivySettings, ivyConfName, md)
// add all supplied maven artifacts as dependencies
addDependenciesToIvy(md, artifacts, ivyConfName)
exclusions.foreach { e =>
md.addExcludeRule(createExclusion(e + ":*", ivySettings, ivyConfName))
}
// resolve dependencies
val rr: ResolveReport = ivy.resolve(md, resolveOptions)
if (rr.hasError) {
throw new RuntimeException(rr.getAllProblemMessages.toString)
}
// retrieve all resolved dependencies
ivy.retrieve(rr.getModuleDescriptor.getModuleRevisionId,
packagesDirectory.getAbsolutePath + File.separator +
"[organization]_[artifact]-[revision](-[classifier]).[ext]",
retrieveOptions.setConfs(Array(ivyConfName)))
resolveDependencyPaths(rr.getArtifacts.toArray, packagesDirectory)
} finally {
System.setOut(sysOut)
clearIvyResolutionFiles(md.getModuleRevisionId, ivySettings, ivyConfName)
}
}
}
private[deploy] def createExclusion(
coords: String,
ivySettings: IvySettings,
ivyConfName: String): ExcludeRule = {
val c = extractMavenCoordinates(coords)(0)
val id = new ArtifactId(new ModuleId(c.groupId, c.artifactId), "*", "*", "*")
val rule = new DefaultExcludeRule(id, ivySettings.getMatcher("glob"), null)
rule.addConfiguration(ivyConfName)
rule
}
def parseSparkConfProperty(pair: String): (String, String) = {
pair.split("=", 2).toSeq match {
case Seq(k, v) => (k, v)
case _ => throw new SparkException(s"Spark config without '=': $pair")
}
}
private[deploy] def getSubmitOperations(master: String): SparkSubmitOperation = {
val loader = Utils.getContextOrSparkClassLoader
val serviceLoaders =
ServiceLoader.load(classOf[SparkSubmitOperation], loader)
.asScala
.filter(_.supports(master))
serviceLoaders.size match {
case x if x > 1 =>
throw new SparkException(s"Multiple($x) external SparkSubmitOperations " +
s"clients registered for master url ${master}.")
case 1 => serviceLoaders.headOption.get
case _ =>
throw new IllegalArgumentException(s"No external SparkSubmitOperations " +
s"clients found for master url: '$master'")
}
}
}
/**
* Provides an indirection layer for passing arguments as system properties or flags to
* the user's driver program or to downstream launcher tools.
*/
private case class OptionAssigner(
value: String,
clusterManager: Int,
deployMode: Int,
clOption: String = null,
confKey: String = null,
mergeFn: Option[(String, String) => String] = None)
private[spark] trait SparkSubmitOperation {
def kill(submissionId: String, conf: SparkConf): Unit
def printSubmissionStatus(submissionId: String, conf: SparkConf): Unit
def supports(master: String): Boolean
}
| rednaxelafx/apache-spark | core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala | Scala | apache-2.0 | 60,550 |
/*
Copyright (c) 2017, Qvantel
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Qvantel nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL Qvantel BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.qvantel.jsonapi
import com.qvantel.jsonapi.akka.JsonApiSupport._
import org.specs2.mutable._
import _root_.spray.json._
import _root_.spray.json.DefaultJsonProtocol._
import _root_.akka.http.scaladsl.testkit.Specs2RouteTest
import _root_.akka.http.scaladsl.model._
import _root_.akka.http.scaladsl.server.Directives._
class RelatedResponseAkkaSpec extends Specification with Specs2RouteTest {
def actorRefFactory = system
implicit val apiRoot: com.qvantel.jsonapi.ApiRoot = ApiRoot(None)
@jsonApiResource final case class Test(id: String, name: String)
val test: Option[Test] = Some(Test("teståöä•Ωé®", "name")) // test UTF-8
val emptyTest: Option[Test] = None
val tests: List[Test] = List(Test("test 1", "name 1"), Test("test 2", "name 2"))
val emptyTests: List[Test] = List.empty
val route = get {
complete {
RelatedResponse(test)
}
}
"correctly write to one none case" in {
RelatedResponse(emptyTest).toResponse must be equalTo JsObject(
"data" -> JsNull
)
}
"correctly write to one some case" in {
val answer = rawOne(test.get)
RelatedResponse(test).toResponse must be equalTo answer
RelatedResponse(test.get).toResponse must be equalTo answer
}
"correctly write to one some case with sparse fields defined" in {
implicit val sparseFields: Map[String, List[String]] = Map("tests" -> List("someFieldThatDoesNotExist"))
val answer = rawOne(test.get)
RelatedResponse(test).toResponse must be equalTo answer
RelatedResponse(test.get).toResponse must be equalTo answer
}
"correctly write to many empty case" in {
RelatedResponse(emptyTests).toResponse must be equalTo JsObject(
"data" -> JsArray.empty
)
}
"correctly write to many non-empty case" in {
val answer = rawCollection(tests)
RelatedResponse(tests).toResponse must be equalTo answer
RelatedResponse(tests.toSeq).toResponse must be equalTo answer
RelatedResponse(tests.toIterable).toResponse must be equalTo answer
RelatedResponse(tests.toSet).toResponse must be equalTo answer
}
"correctly write to many non-empty case with sparse fields defined" in {
implicit val sparseFields: Map[String, List[String]] = Map("tests" -> List("someFieldThatDoesNotExist"))
val answer = rawCollection(tests)
RelatedResponse(tests).toResponse must be equalTo answer
RelatedResponse(tests.toSeq).toResponse must be equalTo answer
RelatedResponse(tests.toIterable).toResponse must be equalTo answer
RelatedResponse(tests.toSet).toResponse must be equalTo answer
}
"make sure that correct content type is given" in {
Get("/") ~> route ~> check {
contentType must be equalTo ContentType(MediaTypes.`application/vnd.api+json`)
}
}
}
| qvantel/jsonapi-scala | akka/src/test/scala/com/qvantel/jsonapi/RelatedResponseAkkaSpec.scala | Scala | bsd-3-clause | 4,338 |
package org.jetbrains.plugins.scala
package codeInsight
package template
package impl
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
/**
* @author Alefas
* @since 18/12/14.
*/
final class ScalaCodeContextType extends ScalaFileTemplateContextType.ElementContextType("CODE", ScalaCodeInsightBundle.message("element.context.type.code")) {
override protected def isInContext(offset: Int)
(implicit file: ScalaFile): Boolean =
!(ScalaCommentContextType.isInContext(offset) ||
ScalaStringContextType.isInContext(offset))
} | JetBrains/intellij-scala | scala/codeInsight/src/org/jetbrains/plugins/scala/codeInsight/template/impl/ScalaCodeContextType.scala | Scala | apache-2.0 | 584 |
package com.wavesplatform.transaction.serialization.impl
import java.nio.ByteBuffer
import com.google.common.primitives.{Bytes, Longs}
import com.wavesplatform.serialization.{ByteBufferOps, Deser}
import com.wavesplatform.transaction.assets.IssueTransaction
import com.wavesplatform.transaction.{Proofs, TxVersion}
import play.api.libs.json.{JsObject, Json}
import scala.util.Try
object IssueTxSerializer {
def toJson(tx: IssueTransaction): JsObject = {
import tx._
BaseTxJson.toJson(tx) ++ Json.obj(
"assetId" -> id().toString,
"name" -> name.toStringUtf8,
"quantity" -> quantity,
"reissuable" -> reissuable,
"decimals" -> decimals,
"description" -> description.toStringUtf8
) ++ (if (version >= TxVersion.V2) Json.obj("script" -> script.map(_.bytes().base64)) else JsObject.empty) ++
(if (version == TxVersion.V2) Json.obj("chainId" -> chainId) else JsObject.empty)
}
def bodyBytes(tx: IssueTransaction): Array[Byte] = {
import tx._
lazy val baseBytes = Bytes.concat(
sender.arr,
Deser.serializeArrayWithLength(name.toByteArray),
Deser.serializeArrayWithLength(description.toByteArray),
Longs.toByteArray(quantity),
Array(decimals),
Deser.serializeBoolean(reissuable),
Longs.toByteArray(fee),
Longs.toByteArray(timestamp)
)
version match {
case TxVersion.V1 => Bytes.concat(Array(typeId), baseBytes)
case TxVersion.V2 =>
Bytes.concat(Array(builder.typeId, version, chainId), baseBytes, Deser.serializeOptionOfArrayWithLength(script)(_.bytes().arr))
case _ => PBTransactionSerializer.bodyBytes(tx)
}
}
def toBytes(tx: IssueTransaction): Array[Byte] =
tx.version match {
case TxVersion.V1 => Bytes.concat(Array(tx.typeId), tx.proofs.toSignature.arr, this.bodyBytes(tx)) // Signature before body, typeId appears twice
case TxVersion.V2 => Bytes.concat(Array(0: Byte), this.bodyBytes(tx), tx.proofs.bytes())
case _ => PBTransactionSerializer.bytes(tx)
}
def parseBytes(bytes: Array[Byte]): Try[IssueTransaction] = Try {
def parseCommonPart(version: TxVersion, buf: ByteBuffer): IssueTransaction = {
val sender = buf.getPublicKey
val name = Deser.parseArrayWithLength(buf)
val description = Deser.parseArrayWithLength(buf)
val quantity = buf.getLong
val decimals = buf.getByte
val reissuable = buf.getBoolean
val fee = buf.getLong
val timestamp = buf.getLong
IssueTransaction(
version,
sender,
name,
description,
quantity,
decimals,
reissuable,
None,
fee,
timestamp,
)
}
require(bytes.length > 2, "buffer underflow while parsing transaction")
if (bytes(0) == 0) {
require(bytes(1) == IssueTransaction.typeId, "transaction type mismatch")
val buf = ByteBuffer.wrap(bytes, 4, bytes.length - 4)
parseCommonPart(TxVersion.V2, buf).copy(script = buf.getScript, proofs = buf.getProofs)
} else {
require(bytes(0) == IssueTransaction.typeId, "transaction type mismatch")
val buf = ByteBuffer.wrap(bytes, 1, bytes.length - 1)
val signature = buf.getSignature
require(buf.getByte == IssueTransaction.typeId, "transaction type mismatch")
parseCommonPart(TxVersion.V1, buf).copy(proofs = Proofs(signature))
}
}
}
| wavesplatform/Waves | node/src/main/scala/com/wavesplatform/transaction/serialization/impl/IssueTxSerializer.scala | Scala | mit | 3,474 |
package com.github.tminglei.slickpg
package array
import scala.reflect.ClassTag
import slick.ast.FieldSymbol
import slick.driver.{PostgresDriver, JdbcTypesComponent}
import slick.profile.RelationalProfile.ColumnOption.Length
import java.sql.{ResultSet, PreparedStatement}
trait PgArrayJdbcTypes extends JdbcTypesComponent { driver: PostgresDriver =>
@deprecated(message = "use 'new SimpleArrayJdbcType[T](..).to[SEQ[T]](..)' instead", since = "0.7.1")
class SimpleArrayListJdbcType[T](sqlBaseType: String)(
implicit override val classTag: ClassTag[List[T]], tag: ClassTag[T])
extends WrappedConvArrayJdbcType[T, List](
new SimpleArrayJdbcType(sqlBaseType), _.toList) {
def basedOn[U](tmap: T => U, tcomap: U => T): DriverJdbcType[List[T]] =
delegate.asInstanceOf[SimpleArrayJdbcType[T]].basedOn(tmap, tcomap).to(_.toList)
}
//
class SimpleArrayJdbcType[T](sqlBaseType: String)(
implicit override val classTag: ClassTag[Seq[T]], tag: ClassTag[T])
extends DriverJdbcType[Seq[T]] {
override def sqlType: Int = java.sql.Types.ARRAY
override def sqlTypeName(size: Option[FieldSymbol]): String = s"$sqlBaseType ARRAY"
override def getValue(r: ResultSet, idx: Int): Seq[T] = {
val value = r.getArray(idx)
if (r.wasNull) null else value.getArray.asInstanceOf[Array[Any]].map(_.asInstanceOf[T])
}
override def setValue(vList: Seq[T], p: PreparedStatement, idx: Int): Unit = p.setArray(idx, mkArray(vList))
override def updateValue(vList: Seq[T], r: ResultSet, idx: Int): Unit = r.updateArray(idx, mkArray(vList))
override def hasLiteralForm: Boolean = false
override def valueToSQLLiteral(vList: Seq[T]) = if(vList eq null) "NULL" else s"'${buildArrayStr(vList)}'"
//--
private def mkArray(v: Seq[T]): java.sql.Array = utils.SimpleArrayUtils.mkArray(buildArrayStr)(sqlBaseType, v)
protected def buildArrayStr(vList: Seq[Any]): String = utils.SimpleArrayUtils.mkString[Any](_.toString)(vList)
///
def basedOn[U](tmap: T => U, tcomap: U => T): SimpleArrayJdbcType[T] =
new SimpleArrayJdbcType[T](sqlBaseType) {
override def getValue(r: ResultSet, idx: Int): Seq[T] = {
val value = r.getArray(idx)
if (r.wasNull) null else value.getArray.asInstanceOf[Array[Any]]
.map(e => tcomap(e.asInstanceOf[U]))
}
//--
override protected def buildArrayStr(v: Seq[Any]): String = super.buildArrayStr(v.map(e => tmap(e.asInstanceOf[T])))
}
def to[SEQ[T] <: Seq[T]](conv: Seq[T] => SEQ[T])(implicit classTag: ClassTag[SEQ[T]]): DriverJdbcType[SEQ[T]] =
new WrappedConvArrayJdbcType[T, SEQ](this, conv)
}
/* alias, added for back compatible */
@deprecated(message = "use AdvancedArrayListJdbcType instead", since = "0.6.5")
type NestedArrayListJdbcType[T] = AdvancedArrayListJdbcType[T]
///-- can be used to map complex composite/nested array
@deprecated(message = "use 'new AdvancedArrayJdbcType[T](..).to[SEQ[T]](..)' instead", since = "0.7.1")
class AdvancedArrayListJdbcType[T](sqlBaseType: String,
fromString: (String => List[T]),
mkString: (List[T] => String))(
implicit override val classTag: ClassTag[List[T]], tag: ClassTag[T])
extends WrappedConvArrayJdbcType[T, List](
new AdvancedArrayJdbcType(sqlBaseType, fromString, v => mkString(v.toList)), _.toList)
//
class AdvancedArrayJdbcType[T](sqlBaseType: String,
fromString: (String => Seq[T]),
mkString: (Seq[T] => String))(
implicit override val classTag: ClassTag[Seq[T]], tag: ClassTag[T])
extends DriverJdbcType[Seq[T]] {
override def sqlType: Int = java.sql.Types.ARRAY
override def sqlTypeName(size: Option[FieldSymbol]): String = s"$sqlBaseType ARRAY"
override def getValue(r: ResultSet, idx: Int): Seq[T] = {
val value = r.getString(idx)
if (r.wasNull) null else fromString(value)
}
override def setValue(vList: Seq[T], p: PreparedStatement, idx: Int): Unit = p.setArray(idx, mkArray(vList))
override def updateValue(vList: Seq[T], r: ResultSet, idx: Int): Unit = r.updateArray(idx, mkArray(vList))
override def hasLiteralForm: Boolean = false
override def valueToSQLLiteral(vList: Seq[T]) = if(vList eq null) "NULL" else s"'${mkString(vList)}'"
//--
private def mkArray(v: Seq[T]): java.sql.Array = utils.SimpleArrayUtils.mkArray(mkString)(sqlBaseType, v)
def to[SEQ[T] <: Seq[T]](conv: Seq[T] => SEQ[T])(implicit classTag: ClassTag[SEQ[T]]): DriverJdbcType[SEQ[T]] =
new WrappedConvArrayJdbcType[T, SEQ](this, conv)
}
/////////////////////////////////////////////////////////////////////////////////////////////
private[array] class WrappedConvArrayJdbcType[T, SEQ[T] <: Seq[T]](val delegate: DriverJdbcType[Seq[T]], val conv: Seq[T] => SEQ[T])(
implicit override val classTag: ClassTag[SEQ[T]], tag: ClassTag[T]) extends DriverJdbcType[SEQ[T]] {
override def sqlType: Int = delegate.sqlType
override def sqlTypeName(size: Option[FieldSymbol]): String = delegate.sqlTypeName(size)
override def getValue(r: ResultSet, idx: Int): SEQ[T] = Option(delegate.getValue(r, idx)).map(conv).getOrElse(null.asInstanceOf[SEQ[T]])
override def setValue(vList: SEQ[T], p: PreparedStatement, idx: Int): Unit = delegate.setValue(vList, p, idx)
override def updateValue(vList: SEQ[T], r: ResultSet, idx: Int): Unit = delegate.updateValue(vList, r, idx)
override def hasLiteralForm: Boolean = delegate.hasLiteralForm
override def valueToSQLLiteral(vList: SEQ[T]) = delegate.valueToSQLLiteral(Option(vList).orNull)
}
}
| frosforever/slick-pg | core/src/main/scala/com/github/tminglei/slickpg/array/PgArrayJdbcTypes.scala | Scala | bsd-2-clause | 5,888 |
package chandu0101.scalajs.react.components.demo.components.materialui.svgicons
import chandu0101.scalajs.react.components.materialui.MuiSvgIcon
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.all.svg._
import japgolly.scalajs.react.vdom.prefix_<^._
object ToggleStarBorder {
val component = ReactComponentB[Unit]("ToggleStarBorder")
.render(P => {
MuiSvgIcon()(
path(^.key := "acg", d := "M22 9.24l-7.19-.62L12 2 9.19 8.63 2 9.24l5.46 4.73L5.82 21 12 17.27 18.18 21l-1.63-7.03L22 9.24zM12 15.4l-3.76 2.27 1-4.28-3.32-2.88 4.38-.38L12 6.1l1.71 4.04 4.38.38-3.32 2.88 1 4.28L12 15.4z")
)
}).buildU
def apply() = component()
}
| coreyauger/scalajs-react-components | demo/src/main/scala/chandu0101/scalajs/react/components/demo/components/materialui/svgicons/ToggleStarBorder.scala | Scala | apache-2.0 | 681 |
package com.github.j5ik2o.forseti.domain.client
import com.github.j5ik2o.forseti.domain.support.EntityId
case class ClientSessionId(value: Long) extends EntityId
| j5ik2o/forseti | domain/src/main/scala/com/github/j5ik2o/forseti/domain/client/ClientSessionId.scala | Scala | mit | 164 |
package metal
package immutable
import scala.reflect.ClassTag
trait MapFactory extends generic.MapFactory {
type M[K, V] <: immutable.Map[K, V]
type MM[K, V] <: mutable.Map[K, V] { type Immutable <: M[K, V] }
def mutableFactory: mutable.MapFactory { type M[K, V] = MM[K, V]; type KExtra[K] = MapFactory.this.KExtra[K]; type VExtra[V] = MapFactory.this.VExtra[V] }
def empty[K:ClassTag:KExtra, V:ClassTag:VExtra]: M[K, V] = mutableFactory.empty[K, V].result()
def apply[K:ClassTag:KExtra, V:ClassTag:VExtra](kvPairs: (K, V)*): M[K, V] = mutableFactory.apply(kvPairs: _*).result()
def fromMap[K:ClassTag:KExtra, V:ClassTag:VExtra](map: scala.collection.Map[K, V]): M[K, V] = mutableFactory.fromMap(map).result()
def fromArrays[K:ClassTag:KExtra, V:ClassTag:VExtra](keysArray: Array[K], valuesArray: Array[V]): M[K, V] = mutableFactory.fromArrays(keysArray, valuesArray).result()
def fromIterable[K:ClassTag:KExtra, V:ClassTag:VExtra](keyValuePairs: Iterable[(K, V)]): M[K, V] = mutableFactory.fromIterable(keyValuePairs).result()
}
| denisrosset/ptrcoll | library/src/main/scala/metal/immutable/MapFactory.scala | Scala | mit | 1,056 |
package org.sisioh.aws4s.sqs.model
import com.amazonaws.services.sqs.model._
import org.sisioh.aws4s.PimpedType
import scala.collection.JavaConverters._
object ChangeMessageVisibilityBatchRequestFactory {
def create(): ChangeMessageVisibilityBatchRequest = new ChangeMessageVisibilityBatchRequest()
def create(queueUrl: String, entries: Seq[ChangeMessageVisibilityBatchRequestEntry]): ChangeMessageVisibilityBatchRequest =
new ChangeMessageVisibilityBatchRequest(queueUrl, entries.asJava)
}
class RichChangeMessageVisibilityBatchRequest(val underlying: ChangeMessageVisibilityBatchRequest)
extends AnyVal with PimpedType[ChangeMessageVisibilityBatchRequest] {
def queueUrlOpt: Option[String] = Option(underlying.getQueueUrl)
def queueUrlOpt_=(value: Option[String]): Unit =
underlying.setQueueUrl(value.orNull)
def withQueueUrlOpt(value: Option[String]): ChangeMessageVisibilityBatchRequest =
underlying.withQueueUrl(value.orNull)
// ---
def entries: Seq[ChangeMessageVisibilityBatchRequestEntry] = underlying.getEntries.asScala.toVector
def entries_=(value: Seq[ChangeMessageVisibilityBatchRequestEntry]): Unit =
underlying.setEntries(value.asJava)
def withEntries(value: Seq[ChangeMessageVisibilityBatchRequestEntry]): ChangeMessageVisibilityBatchRequest =
underlying.withEntries(value.asJava)
}
| everpeace/aws4s | aws4s-sqs/src/main/scala/org/sisioh/aws4s/sqs/model/RichChangeMessageVisibilityBatchRequest.scala | Scala | mit | 1,355 |
/**
* Copyright 2017 ZuInnoTe (Jörn Franke) <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
/**
*
* This test intregrates HDFS and Spark
*
*/
package org.zuinnote.spark.bitcoin.block
import java.io.{File, IOException}
import java.nio.file.Files
import org.apache.commons.io.FileUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hdfs.MiniDFSCluster
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.scalatest.{BeforeAndAfterAll, GivenWhenThen}
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import org.zuinnote.spark.bitcoin.model.{BitcoinBlock, BitcoinBlockWithAuxPOW, EnrichedBitcoinBlock, EnrichedBitcoinBlockWithAuxPOW}
import java.math.BigDecimal
class SparkBitcoinBlockDSSparkMasterIntegrationSpec extends AnyFlatSpec with BeforeAndAfterAll with GivenWhenThen with Matchers {
private val master: String = "local[2]"
// private val appName: String = "spark-hadoocryptoledger-ds-integrationtest"
private val tmpPrefix: String = "hcl-integrationtest"
private lazy val tmpPath: java.nio.file.Path = Files.createTempDirectory(tmpPrefix)
// private val CLUSTERNAME: String = "hcl-minicluster"
private val DFS_INPUT_DIR_NAME: String = "/input"
// private val DFS_OUTPUT_DIR_NAME: String = "/output"
// private val DEFAULT_OUTPUT_FILENAME: String = "part-00000"
private val DFS_INPUT_DIR: Path = new Path(DFS_INPUT_DIR_NAME)
// private val DFS_OUTPUT_DIR: Path = new Path(DFS_OUTPUT_DIR_NAME)
private val NOOFDATANODES: Int = 4
private lazy val dfsCluster: MiniDFSCluster = {
// create DFS mini cluster
val conf = new Configuration()
val baseDir = new File(tmpPath.toString).getAbsoluteFile
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath)
val builder = new MiniDFSCluster.Builder(conf)
val cluster = builder.numDataNodes(NOOFDATANODES).build()
conf.set("fs.defaultFS", cluster.getFileSystem().getUri.toString)
cluster
}
private lazy val spark: SparkSession = {
SparkSession.builder().master(master).appName(this.getClass.getSimpleName).getOrCreate()
}
override def beforeAll(): Unit = {
super.beforeAll()
// create shutdown hook to remove temp files (=HDFS MiniCluster) after shutdown, may need to rethink to avoid many threads are created
Runtime.getRuntime.addShutdownHook(new Thread("remove temporary directory") {
override def run(): Unit = {
try {
FileUtils.deleteDirectory(tmpPath.toFile)
} catch {
case e: IOException => throw new RuntimeException("Error temporary files in following path could not be deleted " + tmpPath, e)
}
}
})
}
override def afterAll(): Unit = {
// close dfs cluster
dfsCluster.shutdown()
super.afterAll()
}
"The genesis block on DFS" should "be fully read in dataframe" in {
Given("Genesis Block on DFSCluster")
// create input directory
dfsCluster.getFileSystem().delete(DFS_INPUT_DIR, true)
dfsCluster.getFileSystem().mkdirs(DFS_INPUT_DIR)
// copy bitcoin blocks
val classLoader = getClass.getClassLoader
// put testdata on DFS
val fileName: String = "genesis.blk"
val fileNameFullLocal = classLoader.getResource("testdata/" + fileName).getFile
val inputFile = new Path(fileNameFullLocal)
dfsCluster.getFileSystem().copyFromLocalFile(false, false, inputFile, DFS_INPUT_DIR)
When("reading Genesis block using datasource")
val df = spark.read.format("org.zuinnote.spark.bitcoin.block").option("magic", "F9BEB4D9").load(dfsCluster.getFileSystem().getUri.toString + DFS_INPUT_DIR_NAME)
Then("all fields should be readable trough Spark SQL")
// check first if structure is correct
assert("blockSize" == df.columns(0))
assert("magicNo" == df.columns(1))
assert("version" == df.columns(2))
assert("time" == df.columns(3))
assert("bits" == df.columns(4))
assert("nonce" == df.columns(5))
assert("transactionCounter" == df.columns(6))
assert("hashPrevBlock" == df.columns(7))
assert("hashMerkleRoot" == df.columns(8))
assert("transactions" == df.columns(9))
// validate block data
val blockSize = df.select("blockSize").collect
assert(285 == blockSize(0).getLong(0))
val magicNo = df.select("magicNo").collect
val magicNoExpected: Array[Byte] = Array(0xF9.toByte, 0xBE.toByte, 0xB4.toByte, 0xD9.toByte)
assert(magicNoExpected.deep == magicNo(0).get(0).asInstanceOf[Array[Byte]].deep)
val version = df.select("version").collect
assert(1 == version(0).getLong(0))
val time = df.select("time").collect
assert(1231006505 == time(0).getLong(0))
val bits = df.select("bits").collect
val bitsExpected: Array[Byte] = Array(0xFF.toByte, 0xFF.toByte, 0x00.toByte, 0x1D.toByte)
assert(bitsExpected.deep == bits(0).get(0).asInstanceOf[Array[Byte]].deep)
val nonce = df.select("nonce").collect
assert(2083236893 == nonce(0).getLong(0))
val transactionCounter = df.select("transactionCounter").collect
assert(1 == transactionCounter(0).getLong(0))
val hashPrevBlock = df.select("hashPrevBlock").collect
val hashPrevBlockExpected: Array[Byte] = Array(0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte)
assert(hashPrevBlockExpected.deep == hashPrevBlock(0).get(0).asInstanceOf[Array[Byte]].deep)
val hashMerkleRoot = df.select("hashMerkleRoot").collect
val hashMerkleRootExpected: Array[Byte] = Array(0x3B.toByte, 0xA3.toByte, 0xED.toByte, 0xFD.toByte, 0x7A.toByte, 0x7B.toByte, 0x12.toByte, 0xB2.toByte, 0x7A.toByte, 0xC7.toByte, 0x2C.toByte, 0x3E.toByte, 0x67.toByte, 0x76.toByte, 0x8F.toByte, 0x61.toByte, 0x7F.toByte,
0xC8.toByte, 0x1B.toByte, 0xC3.toByte, 0x88.toByte, 0x8A.toByte, 0x51.toByte, 0x32.toByte, 0x3A.toByte, 0x9F.toByte, 0xB8.toByte, 0xAA.toByte, 0x4B.toByte, 0x1E.toByte, 0x5E.toByte, 0x4A.toByte)
assert(hashMerkleRootExpected.deep == hashMerkleRoot(0).get(0).asInstanceOf[Array[Byte]].deep)
// validate transactions
val transactionsDF = df.select(explode(df("transactions")).alias("transactions"))
// one transaction
val transactionsDFCount = transactionsDF.count
assert(1 == transactionsDFCount)
val transactionsVersion = transactionsDF.select("transactions.version").collect
assert(1 == transactionsVersion(0).getLong(0))
val inCounter = transactionsDF.select("transactions.inCounter").collect
val inCounterExpected: Array[Byte] = Array(0x01.toByte)
assert(inCounterExpected.deep == inCounter(0).get(0).asInstanceOf[Array[Byte]].deep)
val outCounter = transactionsDF.select("transactions.outCounter").collect
val outCounterExpected: Array[Byte] = Array(0x01.toByte)
assert(outCounterExpected.deep == outCounter(0).get(0).asInstanceOf[Array[Byte]].deep)
val transactionsLockTime = transactionsDF.select("transactions.lockTime").collect
assert(0 == transactionsLockTime(0).getLong(0))
val transactionsLOIDF = transactionsDF.select(explode(transactionsDF("transactions.listOfInputs")).alias("listOfInputs"))
val prevTransactionHash = transactionsLOIDF.select("listOfInputs.prevTransactionHash").collect
val prevTransactionHashExpected: Array[Byte] = Array(0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte)
assert(prevTransactionHashExpected.deep == prevTransactionHash(0).get(0).asInstanceOf[Array[Byte]].deep)
val previousTxOutIndex = transactionsLOIDF.select("listOfInputs.previousTxOutIndex").collect
assert(4294967295L == previousTxOutIndex(0).getLong(0))
val txInScriptLength = transactionsLOIDF.select("listOfInputs.txInScriptLength").collect
val txInScriptLengthExpected: Array[Byte] = Array(0x4D.toByte)
assert(txInScriptLengthExpected.deep == txInScriptLength(0).get(0).asInstanceOf[Array[Byte]].deep)
val txInScript = transactionsLOIDF.select("listOfInputs.txInScript").collect
val txInScriptExpected: Array[Byte] = Array(0x04.toByte, 0xFF.toByte, 0xFF.toByte, 0x00.toByte, 0x1D.toByte, 0x01.toByte, 0x04.toByte, 0x45.toByte, 0x54.toByte, 0x68.toByte, 0x65.toByte, 0x20.toByte, 0x54.toByte, 0x69.toByte, 0x6D.toByte, 0x65.toByte,
0x73.toByte, 0x20.toByte, 0x30.toByte, 0x33.toByte, 0x2F.toByte, 0x4A.toByte, 0x61.toByte, 0x6E.toByte, 0x2F.toByte, 0x32.toByte, 0x30.toByte, 0x30.toByte, 0x39.toByte, 0x20.toByte, 0x43.toByte, 0x68.toByte,
0x61.toByte, 0x6E.toByte, 0x63.toByte, 0x65.toByte, 0x6C.toByte, 0x6C.toByte, 0x6F.toByte, 0x72.toByte, 0x20.toByte, 0x6F.toByte, 0x6E.toByte, 0x20.toByte, 0x62.toByte, 0x72.toByte, 0x69.toByte, 0x6E.toByte, 0x6B.toByte,
0x20.toByte, 0x6F.toByte, 0x66.toByte, 0x20.toByte, 0x73.toByte, 0x65.toByte, 0x63.toByte, 0x6F.toByte, 0x6E.toByte, 0x64.toByte, 0x20.toByte, 0x62.toByte, 0x61.toByte, 0x69.toByte, 0x6C.toByte, 0x6F.toByte,
0x75.toByte, 0x74.toByte, 0x20.toByte, 0x66.toByte, 0x6F.toByte, 0x72.toByte, 0x20.toByte, 0x62.toByte, 0x61.toByte, 0x6E.toByte, 0x6B.toByte, 0x73.toByte)
assert(txInScriptExpected.deep == txInScript(0).get(0).asInstanceOf[Array[Byte]].deep)
val seqNo = transactionsLOIDF.select("listOfInputs.seqNo").collect
assert(4294967295L == seqNo(0).getLong(0))
val transactionsLOODF = transactionsDF.select(explode(transactionsDF("transactions.listOfOutputs")).alias("listOfOutputs"))
val value = transactionsLOODF.select("listOfOutputs.value").collect
assert(BigDecimal.valueOf(5000000000L).compareTo(value(0).getDecimal(0))==0)
val txOutScriptLength = transactionsLOODF.select("listOfOutputs.txOutScriptLength").collect
val txOutScriptLengthExpected: Array[Byte] = Array(0x43.toByte)
assert(txOutScriptLengthExpected.deep == txOutScriptLength(0).get(0).asInstanceOf[Array[Byte]].deep)
val txOutScript = transactionsLOODF.select("listOfOutputs.txOutScript").collect
val txOutScriptExpected: Array[Byte] = Array(0x41.toByte, 0x04.toByte, 0x67.toByte, 0x8A.toByte, 0xFD.toByte, 0xB0.toByte, 0xFE.toByte, 0x55.toByte, 0x48.toByte, 0x27.toByte, 0x19.toByte, 0x67.toByte, 0xF1.toByte, 0xA6.toByte, 0x71.toByte, 0x30.toByte,
0xB7.toByte, 0x10.toByte, 0x5C.toByte, 0xD6.toByte, 0xA8.toByte, 0x28.toByte, 0xE0.toByte, 0x39.toByte, 0x09.toByte, 0xA6.toByte, 0x79.toByte, 0x62.toByte, 0xE0.toByte, 0xEA.toByte, 0x1F.toByte, 0x61.toByte,
0xDE.toByte, 0xB6.toByte, 0x49.toByte, 0xF6.toByte, 0xBC.toByte, 0x3F.toByte, 0x4C.toByte, 0xEF.toByte, 0x38.toByte, 0xC4.toByte, 0xF3.toByte, 0x55.toByte, 0x04.toByte, 0xE5.toByte, 0x1E.toByte, 0xC1.toByte,
0x12.toByte, 0xDE.toByte, 0x5C.toByte, 0x38.toByte, 0x4D.toByte, 0xF7.toByte, 0xBA.toByte, 0x0B.toByte, 0x8D.toByte, 0x57.toByte, 0x8A.toByte, 0x4C.toByte, 0x70.toByte, 0x2B.toByte, 0x6B.toByte, 0xF1.toByte,
0x1D.toByte, 0x5F.toByte, 0xAC.toByte)
assert(txOutScriptExpected.deep == txOutScript(0).get(0).asInstanceOf[Array[Byte]].deep)
}
"The genesis block on DFS" should "be fully read in dataframe with rich datatypes" in {
Given("Genesis Block on DFSCluster")
// create input directory
dfsCluster.getFileSystem().delete(DFS_INPUT_DIR, true)
dfsCluster.getFileSystem().mkdirs(DFS_INPUT_DIR)
// copy bitcoin blocks
val classLoader = getClass.getClassLoader
// put testdata on DFS
val fileName: String = "genesis.blk"
val fileNameFullLocal = classLoader.getResource("testdata/" + fileName).getFile
val inputFile = new Path(fileNameFullLocal)
dfsCluster.getFileSystem().copyFromLocalFile(false, false, inputFile, DFS_INPUT_DIR)
When("reading Genesis block using datasource")
val df = spark.read.format("org.zuinnote.spark.bitcoin.block").option("magic", "F9BEB4D9").load(dfsCluster.getFileSystem().getUri.toString + DFS_INPUT_DIR_NAME)
Then("all fields should be readable trough Spark SQL")
// check first if structure is correct
assert("blockSize" == df.columns(0))
assert("magicNo" == df.columns(1))
assert("version" == df.columns(2))
assert("time" == df.columns(3))
assert("bits" == df.columns(4))
assert("nonce" == df.columns(5))
assert("transactionCounter" == df.columns(6))
assert("hashPrevBlock" == df.columns(7))
assert("hashMerkleRoot" == df.columns(8))
assert("transactions" == df.columns(9))
// validate block data
val blockSize = df.select("blockSize").collect
assert(285 == blockSize(0).getLong(0))
val magicNo = df.select("magicNo").collect
val magicNoExpected: Array[Byte] = Array(0xF9.toByte, 0xBE.toByte, 0xB4.toByte, 0xD9.toByte)
assert(magicNoExpected.deep == magicNo(0).get(0).asInstanceOf[Array[Byte]].deep)
val version = df.select("version").collect
assert(1 == version(0).getLong(0))
val time = df.select("time").collect
assert(1231006505 == time(0).getLong(0))
val bits = df.select("bits").collect
val bitsExpected: Array[Byte] = Array(0xFF.toByte, 0xFF.toByte, 0x00.toByte, 0x1D.toByte)
assert(bitsExpected.deep == bits(0).get(0).asInstanceOf[Array[Byte]].deep)
val nonce = df.select("nonce").collect
assert(2083236893 == nonce(0).getLong(0))
val transactionCounter = df.select("transactionCounter").collect
assert(1 == transactionCounter(0).getLong(0))
val hashPrevBlock = df.select("hashPrevBlock").collect
val hashPrevBlockExpected: Array[Byte] = Array(0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte)
assert(hashPrevBlockExpected.deep == hashPrevBlock(0).get(0).asInstanceOf[Array[Byte]].deep)
val hashMerkleRoot = df.select("hashMerkleRoot").collect
val hashMerkleRootExpected: Array[Byte] = Array(0x3B.toByte, 0xA3.toByte, 0xED.toByte, 0xFD.toByte, 0x7A.toByte, 0x7B.toByte, 0x12.toByte, 0xB2.toByte, 0x7A.toByte, 0xC7.toByte, 0x2C.toByte, 0x3E.toByte, 0x67.toByte, 0x76.toByte, 0x8F.toByte, 0x61.toByte, 0x7F.toByte,
0xC8.toByte, 0x1B.toByte, 0xC3.toByte, 0x88.toByte, 0x8A.toByte, 0x51.toByte, 0x32.toByte, 0x3A.toByte, 0x9F.toByte, 0xB8.toByte, 0xAA.toByte, 0x4B.toByte, 0x1E.toByte, 0x5E.toByte, 0x4A.toByte)
assert(hashMerkleRootExpected.deep == hashMerkleRoot(0).get(0).asInstanceOf[Array[Byte]].deep)
// validate transactions
val transactionsDF = df.select(explode(df("transactions")).alias("transactions"))
// one transaction
val transactionsDFCount = transactionsDF.count
assert(1 == transactionsDFCount)
val transactionsVersion = transactionsDF.select("transactions.version").collect
assert(1 == transactionsVersion(0).getLong(0))
val inCounter = transactionsDF.select("transactions.inCounter").collect
val inCounterExpected: Array[Byte] = Array(0x01.toByte)
assert(inCounterExpected.deep == inCounter(0).get(0).asInstanceOf[Array[Byte]].deep)
val outCounter = transactionsDF.select("transactions.outCounter").collect
val outCounterExpected: Array[Byte] = Array(0x01.toByte)
assert(outCounterExpected.deep == outCounter(0).get(0).asInstanceOf[Array[Byte]].deep)
val transactionsLockTime = transactionsDF.select("transactions.lockTime").collect
assert(0 == transactionsLockTime(0).getLong(0))
val transactionsLOIDF = transactionsDF.select(explode(transactionsDF("transactions.listOfInputs")).alias("listOfInputs"))
val prevTransactionHash = transactionsLOIDF.select("listOfInputs.prevTransactionHash").collect
val prevTransactionHashExpected: Array[Byte] = Array(0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte)
assert(prevTransactionHashExpected.deep == prevTransactionHash(0).get(0).asInstanceOf[Array[Byte]].deep)
val previousTxOutIndex = transactionsLOIDF.select("listOfInputs.previousTxOutIndex").collect
assert(4294967295L == previousTxOutIndex(0).getLong(0))
val txInScriptLength = transactionsLOIDF.select("listOfInputs.txInScriptLength").collect
val txInScriptLengthExpected: Array[Byte] = Array(0x4D.toByte)
assert(txInScriptLengthExpected.deep == txInScriptLength(0).get(0).asInstanceOf[Array[Byte]].deep)
val txInScript = transactionsLOIDF.select("listOfInputs.txInScript").collect
val txInScriptExpected: Array[Byte] = Array(0x04.toByte, 0xFF.toByte, 0xFF.toByte, 0x00.toByte, 0x1D.toByte, 0x01.toByte, 0x04.toByte, 0x45.toByte, 0x54.toByte, 0x68.toByte, 0x65.toByte, 0x20.toByte, 0x54.toByte, 0x69.toByte, 0x6D.toByte, 0x65.toByte,
0x73.toByte, 0x20.toByte, 0x30.toByte, 0x33.toByte, 0x2F.toByte, 0x4A.toByte, 0x61.toByte, 0x6E.toByte, 0x2F.toByte, 0x32.toByte, 0x30.toByte, 0x30.toByte, 0x39.toByte, 0x20.toByte, 0x43.toByte, 0x68.toByte,
0x61.toByte, 0x6E.toByte, 0x63.toByte, 0x65.toByte, 0x6C.toByte, 0x6C.toByte, 0x6F.toByte, 0x72.toByte, 0x20.toByte, 0x6F.toByte, 0x6E.toByte, 0x20.toByte, 0x62.toByte, 0x72.toByte, 0x69.toByte, 0x6E.toByte, 0x6B.toByte,
0x20.toByte, 0x6F.toByte, 0x66.toByte, 0x20.toByte, 0x73.toByte, 0x65.toByte, 0x63.toByte, 0x6F.toByte, 0x6E.toByte, 0x64.toByte, 0x20.toByte, 0x62.toByte, 0x61.toByte, 0x69.toByte, 0x6C.toByte, 0x6F.toByte,
0x75.toByte, 0x74.toByte, 0x20.toByte, 0x66.toByte, 0x6F.toByte, 0x72.toByte, 0x20.toByte, 0x62.toByte, 0x61.toByte, 0x6E.toByte, 0x6B.toByte, 0x73.toByte)
assert(txInScriptExpected.deep == txInScript(0).get(0).asInstanceOf[Array[Byte]].deep)
val seqNo = transactionsLOIDF.select("listOfInputs.seqNo").collect
assert(4294967295L == seqNo(0).getLong(0))
val transactionsLOODF = transactionsDF.select(explode(transactionsDF("transactions.listOfOutputs")).alias("listOfOutputs"))
val value = transactionsLOODF.select("listOfOutputs.value").collect
assert(BigDecimal.valueOf(5000000000L).compareTo(value(0).getDecimal(0))==0)
val txOutScriptLength = transactionsLOODF.select("listOfOutputs.txOutScriptLength").collect
val txOutScriptLengthExpected: Array[Byte] = Array(0x43.toByte)
assert(txOutScriptLengthExpected.deep == txOutScriptLength(0).get(0).asInstanceOf[Array[Byte]].deep)
val txOutScript = transactionsLOODF.select("listOfOutputs.txOutScript").collect
val txOutScriptExpected: Array[Byte] = Array(0x41.toByte, 0x04.toByte, 0x67.toByte, 0x8A.toByte, 0xFD.toByte, 0xB0.toByte, 0xFE.toByte, 0x55.toByte, 0x48.toByte, 0x27.toByte, 0x19.toByte, 0x67.toByte, 0xF1.toByte, 0xA6.toByte, 0x71.toByte, 0x30.toByte,
0xB7.toByte, 0x10.toByte, 0x5C.toByte, 0xD6.toByte, 0xA8.toByte, 0x28.toByte, 0xE0.toByte, 0x39.toByte, 0x09.toByte, 0xA6.toByte, 0x79.toByte, 0x62.toByte, 0xE0.toByte, 0xEA.toByte, 0x1F.toByte, 0x61.toByte,
0xDE.toByte, 0xB6.toByte, 0x49.toByte, 0xF6.toByte, 0xBC.toByte, 0x3F.toByte, 0x4C.toByte, 0xEF.toByte, 0x38.toByte, 0xC4.toByte, 0xF3.toByte, 0x55.toByte, 0x04.toByte, 0xE5.toByte, 0x1E.toByte, 0xC1.toByte,
0x12.toByte, 0xDE.toByte, 0x5C.toByte, 0x38.toByte, 0x4D.toByte, 0xF7.toByte, 0xBA.toByte, 0x0B.toByte, 0x8D.toByte, 0x57.toByte, 0x8A.toByte, 0x4C.toByte, 0x70.toByte, 0x2B.toByte, 0x6B.toByte, 0xF1.toByte,
0x1D.toByte, 0x5F.toByte, 0xAC.toByte)
assert(txOutScriptExpected.deep == txOutScript(0).get(0).asInstanceOf[Array[Byte]].deep)
import df.sparkSession.implicits._
df.as[BitcoinBlock].collect()
}
"The genesis block on DFS" should "be fully read in dataframe enriched with transactionHash" in {
Given("Genesis Block on DFSCluster")
// create input directory
dfsCluster.getFileSystem().delete(DFS_INPUT_DIR, true)
dfsCluster.getFileSystem().mkdirs(DFS_INPUT_DIR)
// copy bitcoin blocks
val classLoader = getClass.getClassLoader
// put testdata on DFS
val fileName: String = "genesis.blk"
val fileNameFullLocal = classLoader.getResource("testdata/" + fileName).getFile
val inputFile = new Path(fileNameFullLocal)
dfsCluster.getFileSystem().copyFromLocalFile(false, false, inputFile, DFS_INPUT_DIR)
When("reading Genesis block using datasource")
val df = spark.read.format("org.zuinnote.spark.bitcoin.block").option("magic", "F9BEB4D9").option("enrich", "true").load(dfsCluster.getFileSystem().getUri.toString + DFS_INPUT_DIR_NAME)
Then("all fields should be readable trough Spark SQL")
// check first if structure is correct
assert("blockSize" == df.columns(0))
assert("magicNo" == df.columns(1))
assert("version" == df.columns(2))
assert("time" == df.columns(3))
assert("bits" == df.columns(4))
assert("nonce" == df.columns(5))
assert("transactionCounter" == df.columns(6))
assert("hashPrevBlock" == df.columns(7))
assert("hashMerkleRoot" == df.columns(8))
assert("transactions" == df.columns(9))
// validate block data
val blockSize = df.select("blockSize").collect
assert(285 == blockSize(0).getLong(0))
val magicNo = df.select("magicNo").collect
val magicNoExpected: Array[Byte] = Array(0xF9.toByte, 0xBE.toByte, 0xB4.toByte, 0xD9.toByte)
assert(magicNoExpected.deep == magicNo(0).get(0).asInstanceOf[Array[Byte]].deep)
val version = df.select("version").collect
assert(1 == version(0).getLong(0))
val time = df.select("time").collect
assert(1231006505 == time(0).getLong(0))
val bits = df.select("bits").collect
val bitsExpected: Array[Byte] = Array(0xFF.toByte, 0xFF.toByte, 0x00.toByte, 0x1D.toByte)
assert(bitsExpected.deep == bits(0).get(0).asInstanceOf[Array[Byte]].deep)
val nonce = df.select("nonce").collect
assert(2083236893 == nonce(0).getLong(0))
val transactionCounter = df.select("transactionCounter").collect
assert(1 == transactionCounter(0).getLong(0))
val hashPrevBlock = df.select("hashPrevBlock").collect
val hashPrevBlockExpected: Array[Byte] = Array(0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte)
assert(hashPrevBlockExpected.deep == hashPrevBlock(0).get(0).asInstanceOf[Array[Byte]].deep)
val hashMerkleRoot = df.select("hashMerkleRoot").collect
val hashMerkleRootExpected: Array[Byte] = Array(0x3B.toByte, 0xA3.toByte, 0xED.toByte, 0xFD.toByte, 0x7A.toByte, 0x7B.toByte, 0x12.toByte, 0xB2.toByte, 0x7A.toByte, 0xC7.toByte, 0x2C.toByte, 0x3E.toByte, 0x67.toByte, 0x76.toByte, 0x8F.toByte, 0x61.toByte, 0x7F.toByte,
0xC8.toByte, 0x1B.toByte, 0xC3.toByte, 0x88.toByte, 0x8A.toByte, 0x51.toByte, 0x32.toByte, 0x3A.toByte, 0x9F.toByte, 0xB8.toByte, 0xAA.toByte, 0x4B.toByte, 0x1E.toByte, 0x5E.toByte, 0x4A.toByte)
assert(hashMerkleRootExpected.deep == hashMerkleRoot(0).get(0).asInstanceOf[Array[Byte]].deep)
// validate transactions
val transactionsDF = df.select(explode(df("transactions")).alias("transactions"))
// one transaction
val transactionsDFCount = transactionsDF.count
assert(1 == transactionsDFCount)
val currentTransactionHash = transactionsDF.select("transactions.currentTransactionHash").collect
val currentTransactionHashExpected: Array[Byte] = Array(0x3B.toByte, 0xA3.toByte, 0xED.toByte, 0xFD.toByte, 0x7A.toByte, 0x7B.toByte, 0x12.toByte, 0xB2.toByte, 0x7A.toByte, 0xC7.toByte, 0x2C.toByte, 0x3E.toByte, 0x67.toByte, 0x76.toByte, 0x8F.toByte, 0x61.toByte,
0x7F.toByte, 0xC8.toByte, 0x1B.toByte, 0xC3.toByte, 0x88.toByte, 0x8A.toByte, 0x51.toByte, 0x32.toByte, 0x3A.toByte, 0x9F.toByte, 0xB8.toByte, 0xAA.toByte, 0x4B.toByte, 0x1E.toByte, 0x5E.toByte, 0x4A.toByte)
assert(currentTransactionHashExpected.deep == currentTransactionHash(0).get(0).asInstanceOf[Array[Byte]].deep)
val transactionsVersion = transactionsDF.select("transactions.version").collect
assert(1 == transactionsVersion(0).getLong(0))
val inCounter = transactionsDF.select("transactions.inCounter").collect
val inCounterExpected: Array[Byte] = Array(0x01.toByte)
assert(inCounterExpected.deep == inCounter(0).get(0).asInstanceOf[Array[Byte]].deep)
val outCounter = transactionsDF.select("transactions.outCounter").collect
val outCounterExpected: Array[Byte] = Array(0x01.toByte)
assert(outCounterExpected.deep == outCounter(0).get(0).asInstanceOf[Array[Byte]].deep)
val transactionsLockTime = transactionsDF.select("transactions.lockTime").collect
assert(0 == transactionsLockTime(0).getLong(0))
val transactionsLOIDF = transactionsDF.select(explode(transactionsDF("transactions.listOfInputs")).alias("listOfInputs"))
val prevTransactionHash = transactionsLOIDF.select("listOfInputs.prevTransactionHash").collect
val prevTransactionHashExpected: Array[Byte] = Array(0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte)
assert(prevTransactionHashExpected.deep == prevTransactionHash(0).get(0).asInstanceOf[Array[Byte]].deep)
val previousTxOutIndex = transactionsLOIDF.select("listOfInputs.previousTxOutIndex").collect
assert(4294967295L == previousTxOutIndex(0).getLong(0))
val txInScriptLength = transactionsLOIDF.select("listOfInputs.txInScriptLength").collect
val txInScriptLengthExpected: Array[Byte] = Array(0x4D.toByte)
assert(txInScriptLengthExpected.deep == txInScriptLength(0).get(0).asInstanceOf[Array[Byte]].deep)
val txInScript = transactionsLOIDF.select("listOfInputs.txInScript").collect
val txInScriptExpected: Array[Byte] = Array(0x04.toByte, 0xFF.toByte, 0xFF.toByte, 0x00.toByte, 0x1D.toByte, 0x01.toByte, 0x04.toByte, 0x45.toByte, 0x54.toByte, 0x68.toByte, 0x65.toByte, 0x20.toByte, 0x54.toByte, 0x69.toByte, 0x6D.toByte, 0x65.toByte,
0x73.toByte, 0x20.toByte, 0x30.toByte, 0x33.toByte, 0x2F.toByte, 0x4A.toByte, 0x61.toByte, 0x6E.toByte, 0x2F.toByte, 0x32.toByte, 0x30.toByte, 0x30.toByte, 0x39.toByte, 0x20.toByte, 0x43.toByte, 0x68.toByte,
0x61.toByte, 0x6E.toByte, 0x63.toByte, 0x65.toByte, 0x6C.toByte, 0x6C.toByte, 0x6F.toByte, 0x72.toByte, 0x20.toByte, 0x6F.toByte, 0x6E.toByte, 0x20.toByte, 0x62.toByte, 0x72.toByte, 0x69.toByte, 0x6E.toByte, 0x6B.toByte,
0x20.toByte, 0x6F.toByte, 0x66.toByte, 0x20.toByte, 0x73.toByte, 0x65.toByte, 0x63.toByte, 0x6F.toByte, 0x6E.toByte, 0x64.toByte, 0x20.toByte, 0x62.toByte, 0x61.toByte, 0x69.toByte, 0x6C.toByte, 0x6F.toByte,
0x75.toByte, 0x74.toByte, 0x20.toByte, 0x66.toByte, 0x6F.toByte, 0x72.toByte, 0x20.toByte, 0x62.toByte, 0x61.toByte, 0x6E.toByte, 0x6B.toByte, 0x73.toByte)
assert(txInScriptExpected.deep == txInScript(0).get(0).asInstanceOf[Array[Byte]].deep)
val seqNo = transactionsLOIDF.select("listOfInputs.seqNo").collect
assert(4294967295L == seqNo(0).getLong(0))
val transactionsLOODF = transactionsDF.select(explode(transactionsDF("transactions.listOfOutputs")).alias("listOfOutputs"))
val value = transactionsLOODF.select("listOfOutputs.value").collect
assert(BigDecimal.valueOf(5000000000L).compareTo(value(0).getDecimal(0))==0)
val txOutScriptLength = transactionsLOODF.select("listOfOutputs.txOutScriptLength").collect
val txOutScriptLengthExpected: Array[Byte] = Array(0x43.toByte)
assert(txOutScriptLengthExpected.deep == txOutScriptLength(0).get(0).asInstanceOf[Array[Byte]].deep)
val txOutScript = transactionsLOODF.select("listOfOutputs.txOutScript").collect
val txOutScriptExpected: Array[Byte] = Array(0x41.toByte, 0x04.toByte, 0x67.toByte, 0x8A.toByte, 0xFD.toByte, 0xB0.toByte, 0xFE.toByte, 0x55.toByte, 0x48.toByte, 0x27.toByte, 0x19.toByte, 0x67.toByte, 0xF1.toByte, 0xA6.toByte, 0x71.toByte, 0x30.toByte,
0xB7.toByte, 0x10.toByte, 0x5C.toByte, 0xD6.toByte, 0xA8.toByte, 0x28.toByte, 0xE0.toByte, 0x39.toByte, 0x09.toByte, 0xA6.toByte, 0x79.toByte, 0x62.toByte, 0xE0.toByte, 0xEA.toByte, 0x1F.toByte, 0x61.toByte,
0xDE.toByte, 0xB6.toByte, 0x49.toByte, 0xF6.toByte, 0xBC.toByte, 0x3F.toByte, 0x4C.toByte, 0xEF.toByte, 0x38.toByte, 0xC4.toByte, 0xF3.toByte, 0x55.toByte, 0x04.toByte, 0xE5.toByte, 0x1E.toByte, 0xC1.toByte,
0x12.toByte, 0xDE.toByte, 0x5C.toByte, 0x38.toByte, 0x4D.toByte, 0xF7.toByte, 0xBA.toByte, 0x0B.toByte, 0x8D.toByte, 0x57.toByte, 0x8A.toByte, 0x4C.toByte, 0x70.toByte, 0x2B.toByte, 0x6B.toByte, 0xF1.toByte,
0x1D.toByte, 0x5F.toByte, 0xAC.toByte)
assert(txOutScriptExpected.deep == txOutScript(0).get(0).asInstanceOf[Array[Byte]].deep)
}
"The genesis block on DFS" should "be fully read in dataframe enriched with transactionHash (rich datatypes)" in {
Given("Genesis Block on DFSCluster")
// create input directory
dfsCluster.getFileSystem().delete(DFS_INPUT_DIR, true)
dfsCluster.getFileSystem().mkdirs(DFS_INPUT_DIR)
// copy bitcoin blocks
val classLoader = getClass.getClassLoader
// put testdata on DFS
val fileName: String = "genesis.blk"
val fileNameFullLocal = classLoader.getResource("testdata/" + fileName).getFile
val inputFile = new Path(fileNameFullLocal)
dfsCluster.getFileSystem().copyFromLocalFile(false, false, inputFile, DFS_INPUT_DIR)
When("reading Genesis block using datasource")
val df = spark.read.format("org.zuinnote.spark.bitcoin.block").option("magic", "F9BEB4D9").option("enrich", "true").load(dfsCluster.getFileSystem().getUri.toString + DFS_INPUT_DIR_NAME)
Then("all fields should be readable trough Spark SQL")
// check first if structure is correct
assert("blockSize" == df.columns(0))
assert("magicNo" == df.columns(1))
assert("version" == df.columns(2))
assert("time" == df.columns(3))
assert("bits" == df.columns(4))
assert("nonce" == df.columns(5))
assert("transactionCounter" == df.columns(6))
assert("hashPrevBlock" == df.columns(7))
assert("hashMerkleRoot" == df.columns(8))
assert("transactions" == df.columns(9))
// validate block data
val blockSize = df.select("blockSize").collect
assert(285 == blockSize(0).getLong(0))
val magicNo = df.select("magicNo").collect
val magicNoExpected: Array[Byte] = Array(0xF9.toByte, 0xBE.toByte, 0xB4.toByte, 0xD9.toByte)
assert(magicNoExpected.deep == magicNo(0).get(0).asInstanceOf[Array[Byte]].deep)
val version = df.select("version").collect
assert(1 == version(0).getLong(0))
val time = df.select("time").collect
assert(1231006505 == time(0).getLong(0))
val bits = df.select("bits").collect
val bitsExpected: Array[Byte] = Array(0xFF.toByte, 0xFF.toByte, 0x00.toByte, 0x1D.toByte)
assert(bitsExpected.deep == bits(0).get(0).asInstanceOf[Array[Byte]].deep)
val nonce = df.select("nonce").collect
assert(2083236893 == nonce(0).getLong(0))
val transactionCounter = df.select("transactionCounter").collect
assert(1 == transactionCounter(0).getLong(0))
val hashPrevBlock = df.select("hashPrevBlock").collect
val hashPrevBlockExpected: Array[Byte] = Array(0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte)
assert(hashPrevBlockExpected.deep == hashPrevBlock(0).get(0).asInstanceOf[Array[Byte]].deep)
val hashMerkleRoot = df.select("hashMerkleRoot").collect
val hashMerkleRootExpected: Array[Byte] = Array(0x3B.toByte, 0xA3.toByte, 0xED.toByte, 0xFD.toByte, 0x7A.toByte, 0x7B.toByte, 0x12.toByte, 0xB2.toByte, 0x7A.toByte, 0xC7.toByte, 0x2C.toByte, 0x3E.toByte, 0x67.toByte, 0x76.toByte, 0x8F.toByte, 0x61.toByte, 0x7F.toByte,
0xC8.toByte, 0x1B.toByte, 0xC3.toByte, 0x88.toByte, 0x8A.toByte, 0x51.toByte, 0x32.toByte, 0x3A.toByte, 0x9F.toByte, 0xB8.toByte, 0xAA.toByte, 0x4B.toByte, 0x1E.toByte, 0x5E.toByte, 0x4A.toByte)
assert(hashMerkleRootExpected.deep == hashMerkleRoot(0).get(0).asInstanceOf[Array[Byte]].deep)
// validate transactions
val transactionsDF = df.select(explode(df("transactions")).alias("transactions"))
// one transaction
val transactionsDFCount = transactionsDF.count
assert(1 == transactionsDFCount)
val currentTransactionHash = transactionsDF.select("transactions.currentTransactionHash").collect
val currentTransactionHashExpected: Array[Byte] = Array(0x3B.toByte, 0xA3.toByte, 0xED.toByte, 0xFD.toByte, 0x7A.toByte, 0x7B.toByte, 0x12.toByte, 0xB2.toByte, 0x7A.toByte, 0xC7.toByte, 0x2C.toByte, 0x3E.toByte, 0x67.toByte, 0x76.toByte, 0x8F.toByte, 0x61.toByte,
0x7F.toByte, 0xC8.toByte, 0x1B.toByte, 0xC3.toByte, 0x88.toByte, 0x8A.toByte, 0x51.toByte, 0x32.toByte, 0x3A.toByte, 0x9F.toByte, 0xB8.toByte, 0xAA.toByte, 0x4B.toByte, 0x1E.toByte, 0x5E.toByte, 0x4A.toByte)
assert(currentTransactionHashExpected.deep == currentTransactionHash(0).get(0).asInstanceOf[Array[Byte]].deep)
val transactionsVersion = transactionsDF.select("transactions.version").collect
assert(1 == transactionsVersion(0).getLong(0))
val inCounter = transactionsDF.select("transactions.inCounter").collect
val inCounterExpected: Array[Byte] = Array(0x01.toByte)
assert(inCounterExpected.deep == inCounter(0).get(0).asInstanceOf[Array[Byte]].deep)
val outCounter = transactionsDF.select("transactions.outCounter").collect
val outCounterExpected: Array[Byte] = Array(0x01.toByte)
assert(outCounterExpected.deep == outCounter(0).get(0).asInstanceOf[Array[Byte]].deep)
val transactionsLockTime = transactionsDF.select("transactions.lockTime").collect
assert(0 == transactionsLockTime(0).getLong(0))
val transactionsLOIDF = transactionsDF.select(explode(transactionsDF("transactions.listOfInputs")).alias("listOfInputs"))
val prevTransactionHash = transactionsLOIDF.select("listOfInputs.prevTransactionHash").collect
val prevTransactionHashExpected: Array[Byte] = Array(0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte, 0x00.toByte)
assert(prevTransactionHashExpected.deep == prevTransactionHash(0).get(0).asInstanceOf[Array[Byte]].deep)
val previousTxOutIndex = transactionsLOIDF.select("listOfInputs.previousTxOutIndex").collect
assert(4294967295L == previousTxOutIndex(0).getLong(0))
val txInScriptLength = transactionsLOIDF.select("listOfInputs.txInScriptLength").collect
val txInScriptLengthExpected: Array[Byte] = Array(0x4D.toByte)
assert(txInScriptLengthExpected.deep == txInScriptLength(0).get(0).asInstanceOf[Array[Byte]].deep)
val txInScript = transactionsLOIDF.select("listOfInputs.txInScript").collect
val txInScriptExpected: Array[Byte] = Array(0x04.toByte, 0xFF.toByte, 0xFF.toByte, 0x00.toByte, 0x1D.toByte, 0x01.toByte, 0x04.toByte, 0x45.toByte, 0x54.toByte, 0x68.toByte, 0x65.toByte, 0x20.toByte, 0x54.toByte, 0x69.toByte, 0x6D.toByte, 0x65.toByte,
0x73.toByte, 0x20.toByte, 0x30.toByte, 0x33.toByte, 0x2F.toByte, 0x4A.toByte, 0x61.toByte, 0x6E.toByte, 0x2F.toByte, 0x32.toByte, 0x30.toByte, 0x30.toByte, 0x39.toByte, 0x20.toByte, 0x43.toByte, 0x68.toByte,
0x61.toByte, 0x6E.toByte, 0x63.toByte, 0x65.toByte, 0x6C.toByte, 0x6C.toByte, 0x6F.toByte, 0x72.toByte, 0x20.toByte, 0x6F.toByte, 0x6E.toByte, 0x20.toByte, 0x62.toByte, 0x72.toByte, 0x69.toByte, 0x6E.toByte, 0x6B.toByte,
0x20.toByte, 0x6F.toByte, 0x66.toByte, 0x20.toByte, 0x73.toByte, 0x65.toByte, 0x63.toByte, 0x6F.toByte, 0x6E.toByte, 0x64.toByte, 0x20.toByte, 0x62.toByte, 0x61.toByte, 0x69.toByte, 0x6C.toByte, 0x6F.toByte,
0x75.toByte, 0x74.toByte, 0x20.toByte, 0x66.toByte, 0x6F.toByte, 0x72.toByte, 0x20.toByte, 0x62.toByte, 0x61.toByte, 0x6E.toByte, 0x6B.toByte, 0x73.toByte)
assert(txInScriptExpected.deep == txInScript(0).get(0).asInstanceOf[Array[Byte]].deep)
val seqNo = transactionsLOIDF.select("listOfInputs.seqNo").collect
assert(4294967295L == seqNo(0).getLong(0))
val transactionsLOODF = transactionsDF.select(explode(transactionsDF("transactions.listOfOutputs")).alias("listOfOutputs"))
val value = transactionsLOODF.select("listOfOutputs.value").collect
assert(BigDecimal.valueOf(5000000000L).compareTo(value(0).getDecimal(0))==0)
val txOutScriptLength = transactionsLOODF.select("listOfOutputs.txOutScriptLength").collect
val txOutScriptLengthExpected: Array[Byte] = Array(0x43.toByte)
assert(txOutScriptLengthExpected.deep == txOutScriptLength(0).get(0).asInstanceOf[Array[Byte]].deep)
val txOutScript = transactionsLOODF.select("listOfOutputs.txOutScript").collect
val txOutScriptExpected: Array[Byte] = Array(0x41.toByte, 0x04.toByte, 0x67.toByte, 0x8A.toByte, 0xFD.toByte, 0xB0.toByte, 0xFE.toByte, 0x55.toByte, 0x48.toByte, 0x27.toByte, 0x19.toByte, 0x67.toByte, 0xF1.toByte, 0xA6.toByte, 0x71.toByte, 0x30.toByte,
0xB7.toByte, 0x10.toByte, 0x5C.toByte, 0xD6.toByte, 0xA8.toByte, 0x28.toByte, 0xE0.toByte, 0x39.toByte, 0x09.toByte, 0xA6.toByte, 0x79.toByte, 0x62.toByte, 0xE0.toByte, 0xEA.toByte, 0x1F.toByte, 0x61.toByte,
0xDE.toByte, 0xB6.toByte, 0x49.toByte, 0xF6.toByte, 0xBC.toByte, 0x3F.toByte, 0x4C.toByte, 0xEF.toByte, 0x38.toByte, 0xC4.toByte, 0xF3.toByte, 0x55.toByte, 0x04.toByte, 0xE5.toByte, 0x1E.toByte, 0xC1.toByte,
0x12.toByte, 0xDE.toByte, 0x5C.toByte, 0x38.toByte, 0x4D.toByte, 0xF7.toByte, 0xBA.toByte, 0x0B.toByte, 0x8D.toByte, 0x57.toByte, 0x8A.toByte, 0x4C.toByte, 0x70.toByte, 0x2B.toByte, 0x6B.toByte, 0xF1.toByte,
0x1D.toByte, 0x5F.toByte, 0xAC.toByte)
assert(txOutScriptExpected.deep == txOutScript(0).get(0).asInstanceOf[Array[Byte]].deep)
import df.sparkSession.implicits._
df.as[EnrichedBitcoinBlock].collect()
}
"The scriptwitness block on DFS" should "be read in dataframe" in {
Given("Scriptwitness Block on DFSCluster")
// create input directory
dfsCluster.getFileSystem().delete(DFS_INPUT_DIR, true)
dfsCluster.getFileSystem().mkdirs(DFS_INPUT_DIR)
// copy bitcoin blocks
val classLoader = getClass.getClassLoader
// put testdata on DFS
val fileName: String = "scriptwitness.blk"
val fileNameFullLocal = classLoader.getResource("testdata/" + fileName).getFile
val inputFile = new Path(fileNameFullLocal)
dfsCluster.getFileSystem().copyFromLocalFile(false, false, inputFile, DFS_INPUT_DIR)
When("reading scriptwitness block using datasource")
val df = spark.read.format("org.zuinnote.spark.bitcoin.block").option("magic", "F9BEB4D9").load(dfsCluster.getFileSystem().getUri.toString + DFS_INPUT_DIR_NAME)
Then("schema should be correct and number of transactions")
// check first if structure is correct
assert("blockSize" == df.columns(0))
assert("magicNo" == df.columns(1))
assert("version" == df.columns(2))
assert("time" == df.columns(3))
assert("bits" == df.columns(4))
assert("nonce" == df.columns(5))
assert("transactionCounter" == df.columns(6))
assert("hashPrevBlock" == df.columns(7))
assert("hashMerkleRoot" == df.columns(8))
assert("transactions" == df.columns(9))
// validate block data
val blockSize = df.select("blockSize").collect
assert(999275 == blockSize(0).getLong(0))
val magicNo = df.select("magicNo").collect
val magicNoExpected: Array[Byte] = Array(0xF9.toByte, 0xBE.toByte, 0xB4.toByte, 0xD9.toByte)
assert(magicNoExpected.deep == magicNo(0).get(0).asInstanceOf[Array[Byte]].deep)
val version = df.select("version").collect
assert(536870914 == version(0).getLong(0))
val time = df.select("time").collect
assert(1503889880 == time(0).getLong(0))
val bits = df.select("bits").collect
val bitsExpected: Array[Byte] = Array(0xE9.toByte, 0x3C.toByte, 0x01.toByte, 0x18.toByte)
assert(bitsExpected.deep == bits(0).get(0).asInstanceOf[Array[Byte]].deep)
val nonce = df.select("nonce").collect
assert(184429655 == nonce(0).getLong(0))
val transactionCounter = df.select("transactionCounter").collect
assert(470 == transactionCounter(0).getLong(0))
// validate transactions
val transactionsDF = df.select(explode(df("transactions")).alias("transactions"))
val transactionsDFCount = transactionsDF.count
assert(470 == transactionsDFCount)
}
"The scriptwitness2 block on DFS" should "be read in dataframe" in {
Given("Scriptwitness2 Block on DFSCluster")
// create input directory
dfsCluster.getFileSystem().delete(DFS_INPUT_DIR, true)
dfsCluster.getFileSystem().mkdirs(DFS_INPUT_DIR)
// copy bitcoin blocks
val classLoader = getClass.getClassLoader
// put testdata on DFS
val fileName: String = "scriptwitness2.blk"
val fileNameFullLocal = classLoader.getResource("testdata/" + fileName).getFile
val inputFile = new Path(fileNameFullLocal)
dfsCluster.getFileSystem().copyFromLocalFile(false, false, inputFile, DFS_INPUT_DIR)
When("reading scriptwitness2 block using datasource")
val df = spark.read.format("org.zuinnote.spark.bitcoin.block").option("magic", "F9BEB4D9").load(dfsCluster.getFileSystem().getUri.toString + DFS_INPUT_DIR_NAME)
Then("schema should be correct and number of transactions")
// check first if structure is correct
assert("blockSize" == df.columns(0))
assert("magicNo" == df.columns(1))
assert("version" == df.columns(2))
assert("time" == df.columns(3))
assert("bits" == df.columns(4))
assert("nonce" == df.columns(5))
assert("transactionCounter" == df.columns(6))
assert("hashPrevBlock" == df.columns(7))
assert("hashMerkleRoot" == df.columns(8))
assert("transactions" == df.columns(9))
// read data
val blockSize = df.select("blockSize").collect
val magicNo = df.select("magicNo").collect
val magicNoExpected: Array[Byte] = Array(0xF9.toByte, 0xBE.toByte, 0xB4.toByte, 0xD9.toByte)
val time = df.select("time").collect
val version = df.select("version").collect
val bits = df.select("bits").collect
val bitsExpected: Array[Byte] = Array(0xE9.toByte, 0x3C.toByte, 0x01.toByte, 0x18.toByte)
val transactionCounter = df.select("transactionCounter").collect
val nonce = df.select("nonce").collect
// first block
// validate block data
assert(1000031 == blockSize(0).getLong(0))
assert(magicNoExpected.deep == magicNo(0).get(0).asInstanceOf[Array[Byte]].deep)
assert(536870912 == version(0).getLong(0))
assert(1503863706 == time(0).getLong(0))
assert(bitsExpected.deep == bits(0).get(0).asInstanceOf[Array[Byte]].deep)
assert(-706531299 == nonce(0).getLong(0))
assert(2191 == transactionCounter(0).getLong(0))
// second block
// validate block data
assert(999304 == blockSize(1).getLong(0))
assert(magicNoExpected.deep == magicNo(1).get(0).asInstanceOf[Array[Byte]].deep)
assert(536870912 == version(1).getLong(0))
assert(1503836377 == time(1).getLong(0))
assert(bitsExpected.deep == bits(1).get(0).asInstanceOf[Array[Byte]].deep)
assert(-566627396 == nonce(1).getLong(0))
assert(2508 == transactionCounter(1).getLong(0))
// check transactions
val transactionsDF = df.select(explode(df("transactions")).alias("transactions"))
val transactionsDFCount = transactionsDF.count
val transActBothBlocks = 2191 + 2508
assert(transActBothBlocks == transactionsDFCount)
}
"The Namecoin block on DFS with AuxPOW information" should "be read in dataframe" in {
Given("Namecoin Block on DFSCluster")
// create input directory
dfsCluster.getFileSystem().delete(DFS_INPUT_DIR, true)
dfsCluster.getFileSystem().mkdirs(DFS_INPUT_DIR)
// copy bitcoin blocks
val classLoader = getClass.getClassLoader
// put testdata on DFS
val fileName: String = "namecointhreedifferentopinoneblock.blk"
val fileNameFullLocal = classLoader.getResource("testdata/" + fileName).getFile
val inputFile = new Path(fileNameFullLocal)
dfsCluster.getFileSystem().copyFromLocalFile(false, false, inputFile, DFS_INPUT_DIR)
When("reading scriptwitness block using datasource")
val df = spark.read.format("org.zuinnote.spark.bitcoin.block").option("magic", "F9BEB4FE").option("readAuxPOW", "true").load(dfsCluster.getFileSystem().getUri.toString + DFS_INPUT_DIR_NAME)
Then("schema should be correct and number of transactions")
// check first if structure is correct
assert("blockSize" == df.columns(0))
assert("magicNo" == df.columns(1))
assert("version" == df.columns(2))
assert("time" == df.columns(3))
assert("bits" == df.columns(4))
assert("nonce" == df.columns(5))
assert("transactionCounter" == df.columns(6))
assert("hashPrevBlock" == df.columns(7))
assert("hashMerkleRoot" == df.columns(8))
assert("transactions" == df.columns(9))
assert("auxPOW" == df.columns(10))
// validate block data
val blockSize = df.select("blockSize").collect
assert(3125 == blockSize(0).getLong(0))
val magicNo = df.select("magicNo").collect
val magicNoExpected: Array[Byte] = Array(0xF9.toByte, 0xBE.toByte, 0xB4.toByte, 0xFE.toByte)
assert(magicNoExpected.deep == magicNo(0).get(0).asInstanceOf[Array[Byte]].deep)
val version = df.select("version").collect
assert(65796 == version(0).getLong(0))
val time = df.select("time").collect
assert(1506767051 == time(0).getLong(0))
val bits = df.select("bits").collect
val bitsExpected: Array[Byte] = Array(0x71.toByte, 0x63.toByte, 0x01.toByte, 0x18.toByte)
assert(bitsExpected.deep == bits(0).get(0).asInstanceOf[Array[Byte]].deep)
val nonce = df.select("nonce").collect
assert(0 == nonce(0).getLong(0))
val transactionCounter = df.select("transactionCounter").collect
assert(7 == transactionCounter(0).getLong(0))
// validate transactions
val transactionsDF = df.select(explode(df("transactions")).alias("transactions"))
val transactionsDFCount = transactionsDF.count
assert(7 == transactionsDFCount)
}
"The Namecoin block on DFS with AuxPOW information" should "be read in dataframe (with rich data types)" in {
Given("Namecoin Block on DFSCluster")
// create input directory
dfsCluster.getFileSystem().delete(DFS_INPUT_DIR, true)
dfsCluster.getFileSystem().mkdirs(DFS_INPUT_DIR)
// copy bitcoin blocks
val classLoader = getClass.getClassLoader
// put testdata on DFS
val fileName: String = "namecointhreedifferentopinoneblock.blk"
val fileNameFullLocal = classLoader.getResource("testdata/" + fileName).getFile
val inputFile = new Path(fileNameFullLocal)
dfsCluster.getFileSystem().copyFromLocalFile(false, false, inputFile, DFS_INPUT_DIR)
When("reading scriptwitness block using datasource")
val df = spark.read.format("org.zuinnote.spark.bitcoin.block").option("magic", "F9BEB4FE").option("readAuxPOW", "true").load(dfsCluster.getFileSystem().getUri.toString + DFS_INPUT_DIR_NAME)
Then("schema should be correct and number of transactions")
// check first if structure is correct
assert("blockSize" == df.columns(0))
assert("magicNo" == df.columns(1))
assert("version" == df.columns(2))
assert("time" == df.columns(3))
assert("bits" == df.columns(4))
assert("nonce" == df.columns(5))
assert("transactionCounter" == df.columns(6))
assert("hashPrevBlock" == df.columns(7))
assert("hashMerkleRoot" == df.columns(8))
assert("transactions" == df.columns(9))
assert("auxPOW" == df.columns(10))
// validate block data
val blockSize = df.select("blockSize").collect
assert(3125 == blockSize(0).getLong(0))
val magicNo = df.select("magicNo").collect
val magicNoExpected: Array[Byte] = Array(0xF9.toByte, 0xBE.toByte, 0xB4.toByte, 0xFE.toByte)
assert(magicNoExpected.deep == magicNo(0).get(0).asInstanceOf[Array[Byte]].deep)
val version = df.select("version").collect
assert(65796 == version(0).getLong(0))
val time = df.select("time").collect
assert(1506767051 == time(0).getLong(0))
val bits = df.select("bits").collect
val bitsExpected: Array[Byte] = Array(0x71.toByte, 0x63.toByte, 0x01.toByte, 0x18.toByte)
assert(bitsExpected.deep == bits(0).get(0).asInstanceOf[Array[Byte]].deep)
val nonce = df.select("nonce").collect
assert(0 == nonce(0).getLong(0))
val transactionCounter = df.select("transactionCounter").collect
assert(7 == transactionCounter(0).getLong(0))
// validate transactions
val transactionsDF = df.select(explode(df("transactions")).alias("transactions"))
val transactionsDFCount = transactionsDF.count
assert(7 == transactionsDFCount)
import df.sparkSession.implicits._
df.as[BitcoinBlockWithAuxPOW].collect()
}
"The Namecoin block on DFS with AuxPOW information" should "be read in dataframe with enrichment (incomplete test) with rich datatypes" in {
Given("Namecoin Block on DFSCluster")
// create input directory
dfsCluster.getFileSystem().delete(DFS_INPUT_DIR, true)
dfsCluster.getFileSystem().mkdirs(DFS_INPUT_DIR)
// copy bitcoin blocks
val classLoader = getClass.getClassLoader
// put testdata on DFS
val fileName: String = "namecointhreedifferentopinoneblock.blk"
val fileNameFullLocal = classLoader.getResource("testdata/" + fileName).getFile
val inputFile = new Path(fileNameFullLocal)
dfsCluster.getFileSystem().copyFromLocalFile(false, false, inputFile, DFS_INPUT_DIR)
When("reading scriptwitness block using datasource")
val df = spark.read.format("org.zuinnote.spark.bitcoin.block").option("magic", "F9BEB4FE").option("readAuxPOW", "true").option("enrich", "true").load(dfsCluster.getFileSystem().getUri.toString + DFS_INPUT_DIR_NAME)
Then("should be able to collect as dataset")
import df.sparkSession.implicits._
df.as[EnrichedBitcoinBlockWithAuxPOW].collect()
}
}
| ZuInnoTe/spark-hadoopcryptoledger-ds | src/it/scala/org/zuinnote/spark/bitcoin/block/SparkBitcoinBlockDSSparkMasterIntegrationSpec.scala | Scala | apache-2.0 | 52,301 |
package db.dao
import javax.inject.{Inject, Singleton}
import db.MyPostgresDriver
import db.Tables._
import play.api.db.slick.{DatabaseConfigProvider, HasDatabaseConfigProvider}
import scala.concurrent.{ExecutionContext, Future}
// TODO: Remove after beta
@Singleton
class PostgresBetaUserWhitelistDao @Inject() (protected val dbConfigProvider: DatabaseConfigProvider)(implicit ec: ExecutionContext)
extends BetaUserWhitelistDao with HasDatabaseConfigProvider[MyPostgresDriver] {
import _root_.db.TablesHelper._
import profile.api._
override def exists(email: String): Future[Boolean] = {
db.run(betaUserWhitelist.filter(m => m.email === email.trim).exists.result)
}
}
| trifectalabs/roadquality | api/app/db/dao/PostgresBetaUserWhitelistDao.scala | Scala | bsd-3-clause | 689 |
package com.twitter.finatra.test
import com.twitter.finagle.stats.InMemoryStatsReceiver
import org.scalatest.matchers.should.Matchers
object StatTestUtils extends Matchers {
def clear(statsReceiver: InMemoryStatsReceiver): Unit = {
statsReceiver.counters.clear()
statsReceiver.stats.clear()
statsReceiver.gauges.clear()
}
def assertCounter(
statsReceiver: InMemoryStatsReceiver,
name: String,
expectedValue: Long
): Unit = {
val actualValue = statsReceiver.counters.getOrElse(Seq(name), 0L)
actualValue should equal(expectedValue)
}
def assertGauge(
statsReceiver: InMemoryStatsReceiver,
name: String,
expectedValue: Float
): Unit = {
val actualValue = statsReceiver.gauges.getOrElse(Seq(name), () => 0f)
if (expectedValue != actualValue()) {
println("Failure asserting " + name)
actualValue() should equal(expectedValue)
}
}
def printStatsAndCounters(statsReceiver: InMemoryStatsReceiver): Unit = {
def pretty(map: Iterator[(Seq[String], Any)]): Unit = {
for ((keys, value) <- map) {
println(keys.mkString("/") + " = " + value)
}
}
pretty(statsReceiver.stats.iterator)
pretty(statsReceiver.counters.iterator)
pretty(statsReceiver.gauges.iterator)
}
def pretty(map: Iterator[(Seq[String], Any)]): Unit = {
for ((keys, value) <- map) {
println(keys.mkString("/") + " = " + value)
}
}
def printStats(statsReceiver: InMemoryStatsReceiver): Unit = {
val stats = statsReceiver.stats.map {
case (keys, values) =>
keys.mkString("/") -> values.mkString(", ")
}.toSeq
val counters = statsReceiver.counters.map {
case (keys, valueInt) =>
keys.mkString("/") -> valueInt
}.toSeq
val gauges = statsReceiver.gauges.map {
case (keys, intFunc) =>
keys.mkString("/") -> intFunc()
}.toSeq
for ((key, value) <- (stats ++ counters ++ gauges).sortBy(_._1)) {
println("%-75s = %s".format(key, ellipses(value, 60)))
}
println()
}
private def ellipses(any: Any, max: Int) = {
val str = any.toString
if (str.length > max)
str.take(max) + "..."
else
str
}
}
| twitter/finatra | utils/src/test/scala/com/twitter/finatra/test/StatTestUtils.scala | Scala | apache-2.0 | 2,208 |
/* Copyright 2013 Nest Labs
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package nest.sparkle.time.server
import org.clapper.argot._
import org.clapper.argot.ArgotConverters._
import nest.sparkle.util.{ConfigUtil, SparkleApp}
/** Main launcher for Sparkle application */
object Main extends SparkleApp {
override def appName = "sparkle"
override def appVersion = "Version 0.6.0" // TODO: get from the build
val filesPath = parser.option[String](List("f", "files"), "path",
"load .csv/.tsv file, or directory containing .csv or .tsv files")
val watch = parser.option[String](List("w", "watch"), "path",
"load .csv/.tsv file, or directory containing .csv or .tsv files. Reload new or changed files.")
val erase = parser.flag[Boolean](List("format"), "erase and format the database")
val port = parser.option[Int](List("p", "port"), "port", "tcp port for web server")
val root = parser.option[String](List("root"), "path", "directory containing custom web pages to serve")
val display = parser.flag(List("display"), "navigate the desktop web browser to the current dashboard")
initialize()
val launch = SparkleAPIServer(rootConfig)
display.value.foreach { _ => launch.launchDesktopBrowser() }
override def overrides = {
val sparkleConfigName = ConfigUtil.sparkleConfigName
val portMapping = port.value.toList.flatMap { portNumber =>
Seq(
(s"$sparkleConfigName.port", portNumber),
(s"$sparkleConfigName.admin.port", portNumber + 1)
)
}
val rootMapping = root.value.toList.map { value => (s"$sparkleConfigName.web-root.directory", List(value)) }
val eraseOverride = erase.value.toList.map { (s"$sparkleConfigName.erase-store", _) }
val directories = filesPath.value orElse watch.value
val filesOverride = directories.toList.flatMap { path =>
val filesConfig = s"$sparkleConfigName.files-loader"
val doWatch = watch.value.isDefined
Seq(
(s"$filesConfig.directories", List(s"$path")),
(s"$filesConfig.watch-directories", doWatch),
(s"$filesConfig.auto-start", "true")
)
}
portMapping ::: rootMapping ::: eraseOverride ::: filesOverride
}
}
| mighdoll/sparkle | protocol/src/main/scala/nest/sparkle/time/server/Main.scala | Scala | apache-2.0 | 2,708 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api
import _root_.java.util.concurrent.atomic.AtomicInteger
import org.apache.calcite.plan.RelOptUtil
import org.apache.calcite.plan.hep.HepMatchOrder
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.sql2rel.RelDecorrelator
import org.apache.calcite.tools.RuleSet
import org.apache.flink.api.common.functions.MapFunction
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.io.DiscardingOutputFormat
import org.apache.flink.api.java.typeutils.GenericTypeInfo
import org.apache.flink.api.java.{DataSet, ExecutionEnvironment}
import org.apache.flink.table.explain.PlanJsonParser
import org.apache.flink.table.expressions.{Expression, TimeAttribute}
import org.apache.flink.table.plan.nodes.FlinkConventions
import org.apache.flink.table.plan.nodes.dataset.DataSetRel
import org.apache.flink.table.plan.rules.FlinkRuleSets
import org.apache.flink.table.plan.schema.{DataSetTable, RowSchema, TableSourceTable}
import org.apache.flink.table.runtime.MapRunner
import org.apache.flink.table.sinks.{BatchTableSink, TableSink}
import org.apache.flink.table.sources.{BatchTableSource, TableSource}
import org.apache.flink.types.Row
/**
* The abstract base class for batch TableEnvironments.
*
* A TableEnvironment can be used to:
* - convert a [[DataSet]] to a [[Table]]
* - register a [[DataSet]] in the [[TableEnvironment]]'s catalog
* - register a [[Table]] in the [[TableEnvironment]]'s catalog
* - scan a registered table to obtain a [[Table]]
* - specify a SQL query on registered tables to obtain a [[Table]]
* - convert a [[Table]] into a [[DataSet]]
* - explain the AST and execution plan of a [[Table]]
*
* @param execEnv The [[ExecutionEnvironment]] which is wrapped in this [[BatchTableEnvironment]].
* @param config The [[TableConfig]] of this [[BatchTableEnvironment]].
*/
abstract class BatchTableEnvironment(
private[flink] val execEnv: ExecutionEnvironment,
config: TableConfig)
extends TableEnvironment(config) {
// a counter for unique table names.
private val nameCntr: AtomicInteger = new AtomicInteger(0)
// the naming pattern for internally registered tables.
private val internalNamePattern = "^_DataSetTable_[0-9]+$".r
/**
* Checks if the chosen table name is valid.
*
* @param name The table name to check.
*/
override protected def checkValidTableName(name: String): Unit = {
val m = internalNamePattern.findFirstIn(name)
m match {
case Some(_) =>
throw new TableException(s"Illegal Table name. " +
s"Please choose a name that does not contain the pattern $internalNamePattern")
case None =>
}
}
/** Returns a unique table name according to the internal naming pattern. */
protected def createUniqueTableName(): String = "_DataSetTable_" + nameCntr.getAndIncrement()
/**
* Registers an external [[BatchTableSource]] in this [[TableEnvironment]]'s catalog.
* Registered tables can be referenced in SQL queries.
*
* @param name The name under which the [[TableSource]] is registered.
* @param tableSource The [[TableSource]] to register.
*/
override def registerTableSource(name: String, tableSource: TableSource[_]): Unit = {
checkValidTableName(name)
tableSource match {
case batchTableSource: BatchTableSource[_] =>
registerTableInternal(name, new TableSourceTable(batchTableSource))
case _ =>
throw new TableException("Only BatchTableSource can be registered in " +
"BatchTableEnvironment")
}
}
/**
* Writes a [[Table]] to a [[TableSink]].
*
* Internally, the [[Table]] is translated into a [[DataSet]] and handed over to the
* [[TableSink]] to write it.
*
* @param table The [[Table]] to write.
* @param sink The [[TableSink]] to write the [[Table]] to.
* @param queryConfig The configuration for the query to generate.
* @tparam T The expected type of the [[DataSet]] which represents the [[Table]].
*/
override private[flink] def writeToSink[T](
table: Table,
sink: TableSink[T],
queryConfig: QueryConfig): Unit = {
// We do not pass the configuration on, because there is nothing to configure for batch queries.
queryConfig match {
case _: BatchQueryConfig =>
case _ =>
throw new TableException("BatchQueryConfig required to configure batch query.")
}
sink match {
case batchSink: BatchTableSink[T] =>
val outputType = sink.getOutputType
// translate the Table into a DataSet and provide the type that the TableSink expects.
val result: DataSet[T] = translate(table)(outputType)
// Give the DataSet to the TableSink to emit it.
batchSink.emitDataSet(result)
case _ =>
throw new TableException("BatchTableSink required to emit batch Table.")
}
}
/**
* Creates a final converter that maps the internal row type to external type.
*
* @param physicalTypeInfo the input of the sink
* @param schema the input schema with correct field names (esp. for POJO field mapping)
* @param requestedTypeInfo the output type of the sink
* @param functionName name of the map function. Must not be unique but has to be a
* valid Java class identifier.
*/
protected def getConversionMapper[IN, OUT](
physicalTypeInfo: TypeInformation[IN],
schema: RowSchema,
requestedTypeInfo: TypeInformation[OUT],
functionName: String)
: Option[MapFunction[IN, OUT]] = {
val converterFunction = generateRowConverterFunction[OUT](
physicalTypeInfo.asInstanceOf[TypeInformation[Row]],
schema,
requestedTypeInfo,
functionName
)
// add a runner if we need conversion
converterFunction.map { func =>
new MapRunner[IN, OUT](
func.name,
func.code,
func.returnType)
}
}
/**
* Returns the AST of the specified Table API and SQL queries and the execution plan to compute
* the result of the given [[Table]].
*
* @param table The table for which the AST and execution plan will be returned.
* @param extended Flag to include detailed optimizer estimates.
*/
private[flink] def explain(table: Table, extended: Boolean): String = {
val ast = table.getRelNode
val optimizedPlan = optimize(ast)
val dataSet = translate[Row](optimizedPlan, ast.getRowType) (new GenericTypeInfo(classOf[Row]))
dataSet.output(new DiscardingOutputFormat[Row])
val env = dataSet.getExecutionEnvironment
val jasonSqlPlan = env.getExecutionPlan
val sqlPlan = PlanJsonParser.getSqlExecutionPlan(jasonSqlPlan, extended)
s"== Abstract Syntax Tree ==" +
System.lineSeparator +
s"${RelOptUtil.toString(ast)}" +
System.lineSeparator +
s"== Optimized Logical Plan ==" +
System.lineSeparator +
s"${RelOptUtil.toString(optimizedPlan)}" +
System.lineSeparator +
s"== Physical Execution Plan ==" +
System.lineSeparator +
s"$sqlPlan"
}
/**
* Returns the AST of the specified Table API and SQL queries and the execution plan to compute
* the result of the given [[Table]].
*
* @param table The table for which the AST and execution plan will be returned.
*/
def explain(table: Table): String = explain(table: Table, extended = false)
/**
* Registers a [[DataSet]] as a table under a given name in the [[TableEnvironment]]'s catalog.
*
* @param name The name under which the table is registered in the catalog.
* @param dataSet The [[DataSet]] to register as table in the catalog.
* @tparam T the type of the [[DataSet]].
*/
protected def registerDataSetInternal[T](name: String, dataSet: DataSet[T]): Unit = {
val (fieldNames, fieldIndexes) = getFieldInfo[T](dataSet.getType)
val dataSetTable = new DataSetTable[T](
dataSet,
fieldIndexes,
fieldNames
)
registerTableInternal(name, dataSetTable)
}
/**
* Registers a [[DataSet]] as a table under a given name with field names as specified by
* field expressions in the [[TableEnvironment]]'s catalog.
*
* @param name The name under which the table is registered in the catalog.
* @param dataSet The [[DataSet]] to register as table in the catalog.
* @param fields The field expressions to define the field names of the table.
* @tparam T The type of the [[DataSet]].
*/
protected def registerDataSetInternal[T](
name: String, dataSet: DataSet[T], fields: Array[Expression]): Unit = {
val (fieldNames, fieldIndexes) = getFieldInfo[T](
dataSet.getType,
fields)
if (fields.exists(_.isInstanceOf[TimeAttribute])) {
throw new ValidationException(
".rowtime and .proctime time indicators are not allowed in a batch environment.")
}
val dataSetTable = new DataSetTable[T](dataSet, fieldIndexes, fieldNames)
registerTableInternal(name, dataSetTable)
}
/**
* Returns the built-in normalization rules that are defined by the environment.
*/
protected def getBuiltInNormRuleSet: RuleSet = FlinkRuleSets.DATASET_NORM_RULES
/**
* Returns the built-in optimization rules that are defined by the environment.
*/
protected def getBuiltInPhysicalOptRuleSet: RuleSet = FlinkRuleSets.DATASET_OPT_RULES
/**
* Generates the optimized [[RelNode]] tree from the original relational node tree.
*
* @param relNode The original [[RelNode]] tree
* @return The optimized [[RelNode]] tree
*/
private[flink] def optimize(relNode: RelNode): RelNode = {
// 0. convert sub-queries before query decorrelation
val convSubQueryPlan = runHepPlanner(
HepMatchOrder.BOTTOM_UP, FlinkRuleSets.TABLE_SUBQUERY_RULES, relNode, relNode.getTraitSet)
// 0. convert table references
val fullRelNode = runHepPlanner(
HepMatchOrder.BOTTOM_UP,
FlinkRuleSets.TABLE_REF_RULES,
convSubQueryPlan,
relNode.getTraitSet)
// 1. decorrelate
val decorPlan = RelDecorrelator.decorrelateQuery(fullRelNode)
// 2. normalize the logical plan
val normRuleSet = getNormRuleSet
val normalizedPlan = if (normRuleSet.iterator().hasNext) {
runHepPlanner(HepMatchOrder.BOTTOM_UP, normRuleSet, decorPlan, decorPlan.getTraitSet)
} else {
decorPlan
}
// 3. optimize the logical Flink plan
val logicalOptRuleSet = getLogicalOptRuleSet
val logicalOutputProps = relNode.getTraitSet.replace(FlinkConventions.LOGICAL).simplify()
val logicalPlan = if (logicalOptRuleSet.iterator().hasNext) {
runVolcanoPlanner(logicalOptRuleSet, normalizedPlan, logicalOutputProps)
} else {
normalizedPlan
}
// 4. optimize the physical Flink plan
val physicalOptRuleSet = getPhysicalOptRuleSet
val physicalOutputProps = relNode.getTraitSet.replace(FlinkConventions.DATASET).simplify()
val physicalPlan = if (physicalOptRuleSet.iterator().hasNext) {
runVolcanoPlanner(physicalOptRuleSet, logicalPlan, physicalOutputProps)
} else {
logicalPlan
}
physicalPlan
}
/**
* Translates a [[Table]] into a [[DataSet]].
*
* The transformation involves optimizing the relational expression tree as defined by
* Table API calls and / or SQL queries and generating corresponding [[DataSet]] operators.
*
* @param table The root node of the relational expression tree.
* @param tpe The [[TypeInformation]] of the resulting [[DataSet]].
* @tparam A The type of the resulting [[DataSet]].
* @return The [[DataSet]] that corresponds to the translated [[Table]].
*/
protected def translate[A](table: Table)(implicit tpe: TypeInformation[A]): DataSet[A] = {
val relNode = table.getRelNode
val dataSetPlan = optimize(relNode)
translate(dataSetPlan, relNode.getRowType)
}
/**
* Translates a logical [[RelNode]] into a [[DataSet]]. Converts to target type if necessary.
*
* @param logicalPlan The root node of the relational expression tree.
* @param logicalType The row type of the result. Since the logicalPlan can lose the
* field naming during optimization we pass the row type separately.
* @param tpe The [[TypeInformation]] of the resulting [[DataSet]].
* @tparam A The type of the resulting [[DataSet]].
* @return The [[DataSet]] that corresponds to the translated [[Table]].
*/
protected def translate[A](
logicalPlan: RelNode,
logicalType: RelDataType)
(implicit tpe: TypeInformation[A]): DataSet[A] = {
TableEnvironment.validateType(tpe)
logicalPlan match {
case node: DataSetRel =>
val plan = node.translateToPlan(this)
val conversion =
getConversionMapper(
plan.getType,
new RowSchema(logicalType),
tpe,
"DataSetSinkConversion")
conversion match {
case None => plan.asInstanceOf[DataSet[A]] // no conversion necessary
case Some(mapFunction: MapFunction[Row, A]) =>
plan.map(mapFunction)
.returns(tpe)
.name(s"to: ${tpe.getTypeClass.getSimpleName}")
.asInstanceOf[DataSet[A]]
}
case _ =>
throw TableException("Cannot generate DataSet due to an invalid logical plan. " +
"This is a bug and should not happen. Please file an issue.")
}
}
}
| zohar-mizrahi/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/BatchTableEnvironment.scala | Scala | apache-2.0 | 14,409 |
package controllers
import models._
import scala.collection.mutable.ListBuffer
import util.control.Breaks._
/**
* Created by manuel on 31.05.2016.
*/
object PaperStats {
def getStats(papers: List[Papers], papersService: PapersService, paperResultService: PaperResultService,
answerService: AnswerService, conferenceSettingsService: ConferenceSettingsService): List[PapersWithStats] = {
papers.map(p => {
var statsTotal : Map[Int,Int] = Map()
var statDetails : Map[String,Int] = Map()
var results = paperResultService.findByPaperId(p.id.get)
results = addMethodsAndAssumptionsResults(p.id.get,results, papersService, answerService, conferenceSettingsService)
results.foreach(r => {
statsTotal += (r.symbol -> (statsTotal.getOrElse(r.symbol,0)+1))
statDetails += (r.resultType+"-"+r.symbol -> (statDetails.getOrElse(r.resultType+"-"+r.symbol,0)+1))
})
new PapersWithStats(p.id,p.name,p.status,p.permutations,p.secret, statsTotal, statDetails)
})
}
def addMethodsAndAssumptionsResults(id:Int,results: List[PaperResult], papersService: PapersService, answerService: AnswerService,
conferenceSettingsService: ConferenceSettingsService) : List[PaperResult] = {
var allResults = results
val paper = papersService.findById(id).get
val m2aList = answerService.findByPaperId(paper.id.get)
val conferenceSettings = conferenceSettingsService.findAllByPaperId(paper.id.get,paper.conferenceId).to[ListBuffer]
m2aList.foreach(m2a => {
breakable {
conferenceSettings.zipWithIndex.foreach{case (confSetting,i) => {
if(confSetting.flag.get != ConferenceSettings.FLAG_IGNORE) {
if(m2a.method.toLowerCase() == confSetting.methodName.toLowerCase() &&
m2a.assumption.toLowerCase() == confSetting.assumptionName.toLowerCase()) {
var symbol = PaperResult.SYMBOL_ERROR
if(m2a.isRelated > 0.5 && m2a.isCheckedBefore > 0.5) {
symbol = PaperResult.SYMBOL_OK
} else if(m2a.isRelated > 0.5) {
symbol = PaperResult.SYMBOL_WARNING
} else {
}
allResults = allResults:+ new PaperResult(Some(1L),id,PaperResult.TYPE_M2A,"","",symbol,"")
conferenceSettings.remove(i)
break
}
}
}}
}
})
conferenceSettings.foreach(confSetting => {
if(confSetting.flag.get != ConferenceSettings.FLAG_IGNORE){
var symbol = PaperResult.SYMBOL_ERROR
if(confSetting.flag.get==ConferenceSettings.FLAG_EXPECT) {
symbol = PaperResult.SYMBOL_WARNING
}
allResults = allResults:+ new PaperResult(Some(1L),id,PaperResult.TYPE_M2A,"","",symbol,"")
}
})
allResults
}
}
| manuelroesch/PaperValidator | app/controllers/PaperStats.scala | Scala | mit | 2,851 |
package funsets
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
/**
* This class is a test suite for the methods in object FunSets. To run
* the test suite, you can either:
* - run the "test" command in the SBT console
* - right-click the file in eclipse and chose "Run As" - "JUnit Test"
*/
@RunWith(classOf[JUnitRunner])
class FunSetSuite extends FunSuite {
/**
* Link to the scaladoc - very clear and detailed tutorial of FunSuite
*
* http://doc.scalatest.org/1.9.1/index.html#org.scalatest.FunSuite
*
* Operators
* - test
* - ignore
* - pending
*/
/**
* Tests are written using the "test" operator and the "assert" method.
*/
// test("string take") {
// val message = "hello, world"
// assert(message.take(5) == "hello")
// }
/**
* For ScalaTest tests, there exists a special equality operator "===" that
* can be used inside "assert". If the assertion fails, the two values will
* be printed in the error message. Otherwise, when using "==", the test
* error message will only say "assertion failed", without showing the values.
*
* Try it out! Change the values so that the assertion fails, and look at the
* error message.
*/
// test("adding ints") {
// assert(1 + 2 === 3)
// }
import FunSets._
test("contains is implemented") {
assert(contains(x => true, 100))
}
/**
* When writing tests, one would often like to re-use certain values for multiple
* tests. For instance, we would like to create an Int-set and have multiple test
* about it.
*
* Instead of copy-pasting the code for creating the set into every test, we can
* store it in the test class using a val:
*
* val s1 = singletonSet(1)
*
* However, what happens if the method "singletonSet" has a bug and crashes? Then
* the test methods are not even executed, because creating an instance of the
* test class fails!
*
* Therefore, we put the shared values into a separate trait (traits are like
* abstract classes), and create an instance inside each test method.
*
*/
trait TestSets {
val s1 = singletonSet(1)
val s2 = singletonSet(2)
val s3 = singletonSet(3)
val sRange0to20 = rangeSet(0, 20)
}
/**
* This test is currently disabled (by using "ignore") because the method
* "singletonSet" is not yet implemented and the test would fail.
*
* Once you finish your implementation of "singletonSet", exchange the
* function "ignore" by "test".
*/
test("singletonSet(1) contains 1") {
/**
* We create a new instance of the "TestSets" trait, this gives us access
* to the values "s1" to "s3".
*/
new TestSets {
/**
* The string argument of "assert" is a message that is printed in case
* the test fails. This helps identifying which assertion failed.
*/
assert(contains(s1, 1), "Singleton")
assert(!contains(s1, 4), "s1 doesn't contain 4")
}
}
test("union contains all elements of each set") {
new TestSets {
val s = union(s1, s2)
printSet("union s", s)
assert(contains(s, 1), "Union 1")
assert(contains(s, 2), "Union 2")
assert(!contains(s, 3), "Union 3")
}
}
test("rangeSet creates an inclusive integer range") {
new TestSets {
val s = rangeSet(4,6)
printSet("rangeSet", s)
assert(!contains(s, 3), "rangeSet(4,6) has no 3")
assert(contains(s, 4), "rangeSet(4,6) has 4")
assert(contains(s, 5), "rangeSet(4,6) has 5")
assert(contains(s, 6), "rangeSet(4,6) has 6")
assert(!contains(s, 7), "rangeSet(4,6) has no 7")
}
}
test("intersection tests") {
new TestSets {
val s31 = union(s3, s1)
val s13 = union(s1, s3)
printSet("intersection s31", s31)
printSet("intersection s13", s13)
val just2 = intersect(s2, sRange0to20)
val none = intersect(s1, rangeSet(-4,0))
val just3 = intersect(s3, intersect(s31, s13))
printSet("intersection just2", just2)
printSet("intersection none", none)
printSet("intersection just3", just3)
assert(!contains(just2, 1), "just2 has no 1")
assert(contains(just2, 2), "just2 has 2")
assert(!contains(just2, 3), "just2 has no 3")
assert(!contains(none, 1), "none has no 1")
assert(!contains(none, 2), "none has no 2")
assert(!contains(none, 3), "none has no 3")
assert(!contains(just3, 1), "just3 has no 1")
assert(!contains(just3, 2), "just3 has no 2")
assert(contains(just3, 3), "just3 has 3")
}
}
test("diff tests") {
new TestSets {
val range0to5 = rangeSet(0,5)
val range2to3 = rangeSet(2,3)
val no2no3 = diff(range0to5, range2to3)
val none = diff(range2to3, range0to5)
printSet("diff range0to5", range0to5)
printSet("diff range2to3", range2to3)
printSet("diff no2no3", no2no3)
printSet("diff none", none)
assert(!contains(no2no3, -1), "no2no3 has no -1")
assert(contains(no2no3, 0), "no2no3 has 0")
assert(contains(no2no3, 1), "no2no3 has 1")
assert(!contains(no2no3, 2), "no2no3 has no 2")
assert(!contains(no2no3, 3), "no2no3 has no 3")
assert(contains(no2no3, 4), "no2no3 has 4")
assert(contains(no2no3, 5), "no2no3 has 5")
assert(!contains(no2no3, 6), "no2no3 has no 6")
assert(!contains(none, -1), "none has no -1")
assert(!contains(none, 0), "none has no 0")
assert(!contains(none, 1), "none has no 1")
assert(!contains(none, 2), "none has no 2")
assert(!contains(none, 3), "none has no 3")
assert(!contains(none, 4), "none has no 4")
assert(!contains(none, 5), "none has no 5")
assert(!contains(none, 6), "none has no 6")
}
}
test("filter tests") {
new TestSets {
def isEven (x: Int): Boolean = ((x%2) == 0)
val evens = filter(rangeSet(10,16), isEven)
printSet("evens", evens)
assert(!contains(evens, 8), "evens doesn't have 8")
assert(!contains(evens, 9), "evens doesn't have 9")
assert( contains(evens, 10), "evens has 10")
assert(!contains(evens, 11), "evens doesn't have 11")
assert( contains(evens, 12), "evens has 12")
assert(!contains(evens, 13), "evens doesn't have 13")
assert( contains(evens, 14), "evens has 14")
assert(!contains(evens, 15), "evens doesn't have 15")
assert( contains(evens, 16), "evens has 16")
assert(!contains(evens, 17), "evens doesn't have 17")
assert(!contains(evens, 18), "evens doesn't have 18")
}
}
test("forall tests") {
new TestSets {
printSet("forall sRange0to20", sRange0to20)
assert( forall(sRange0to20, (x: Int) => (x < 30)), "forall sRange0to20 < 30")
assert(!forall(sRange0to20, (x: Int) => (x < 10)), "forall sRange0to20 not < 10")
}
}
test("exists tests") {
new TestSets {
printSet("exists sRange0to20", sRange0to20)
assert( exists(sRange0to20, (x: Int) => (x == 12)), "exists sRange0to20 = 12")
assert(!exists(sRange0to20, (x: Int) => (x == 21)), "exists sRange0to20 not = 21")
}
}
test("map tests") {
new TestSets {
val original = rangeSet(0,3)
val cubed = map(original, (x: Int) => x*x*x)
printSet("original", original)
printSet("cubed", cubed)
assert( contains(cubed, 8), "cubed has 8")
assert(!contains(cubed, 9), "cubed doesn't have 9")
}
}
}
| jeffreylloydbrown/classwork | FunctionalProgrammingWithScala/funsets/src/test/scala/funsets/FunSetSuite.scala | Scala | unlicense | 7,545 |
package com.lucidchart.open.cashy.controllers
import javax.inject.Inject
import com.lucidchart.open.cashy.request.AuthAction
import com.lucidchart.open.cashy.models.{AssetModel, Asset, FolderModel, Folder}
import com.lucidchart.open.cashy.views
import play.api.data._
import play.api.data.Forms._
import play.api.mvc.Action
import play.api.i18n.MessagesApi
case class SearchParams(
q: String
)
class SearchController @Inject() (val messagesApi: MessagesApi) extends AppController with play.api.i18n.I18nSupport {
import SearchController._
def search = AuthAction.authenticatedUser { implicit user =>
Action { implicit request =>
searchForm.bindFromRequest.fold(
formWithErrors => Ok(views.html.search.index(formWithErrors, Nil, None)),
data => {
val assets = AssetModel.search(data.q)
// Get the possible folder keys for the bucket
val bucketFolders: Map[String,List[String]] = assets.groupBy(_.bucket).map { case (bucket, assets) =>
(bucket, assets.map { asset =>
// Get all of the possible parent folders for an asset
parentPaths(asset.key)
}.flatten)
}.toMap
// Get the hidden folders for each bucket that start with a key
val hiddenFolders: List[Folder] = bucketFolders.map { case (bucket, folders) =>
FolderModel.findByKeys(bucket, folders).filter(_.hidden)
}.toList.flatten
// If an asset is inside of a hidden folder, mark it as hidden for search result purposes
val viewAssets = assets.map { asset =>
val hidden = hiddenFolders.exists(folder => folder.bucket == asset.bucket && asset.key.startsWith(folder.key))
asset.copy(hidden=hidden)
}
Ok(views.html.search.index(searchForm.bindFromRequest, viewAssets, Some(data.q)))
}
)
}
}
private def parentPaths(assetPath: String): List[String] = {
val folders = assetPath.split("/").dropRight(1)
folders.zipWithIndex.map { case (crumb,idx) =>
folders.take(idx+1).mkString("/")+"/"
}.toList
}
}
object SearchController {
val searchForm = Form(
mapping(
"q" -> text.verifying("Enter a search term", x => x != "")
)(SearchParams.apply)(SearchParams.unapply)
)
}
| lucidsoftware/cashy | app/com/lucidchart/open/cashy/controllers/SearchController.scala | Scala | apache-2.0 | 2,312 |
package io.buoyant.namerd.iface
import com.twitter.finagle.http.{MediaType, Request, Response}
import com.twitter.finagle.{Dtab, Service}
import com.twitter.util.Future
import io.buoyant.admin.names.DelegateApiHandler
import io.buoyant.namer.{Delegator, NamespacedInterpreterConfig, RichActivity}
case class DelegatorConfig(
routerLabel: String,
namespace: String,
dtab: Dtab
)
class NamerdHandler(
interpreterConfigs: Seq[(String, NamespacedInterpreterConfig)],
namerdInterpreters: Map[String, Delegator]
) extends Service[Request, Response] {
override def apply(req: Request): Future[Response] = {
val delegatorConfigs: Seq[Future[DelegatorConfig]] = interpreterConfigs.flatMap {
case (key, config) =>
namerdInterpreters.get(key) match {
case Some(delegator) =>
val dtab = delegator.dtab.toFuture
Some(dtab.map(DelegatorConfig(key, config.namespace.getOrElse("default"), _)))
case None => None
}
}
val collectedConfigs: Future[Seq[DelegatorConfig]] = Future.collect { delegatorConfigs }
collectedConfigs.map(dashboardHtml)
}
private[this] def dashboardHtml(dtabs: Seq[DelegatorConfig]) = {
val rsp = Response()
rsp.contentType = MediaType.Html
rsp.contentString = s"""
<div class="container main">
<div class="row">
<h2>Namespaces</h2>
</div>
<div id="dtab-namespaces" class="row">
</div>
<div id="namerd-stats"></div>
</div>
<script id="dtab-data" type="application/json">${DelegateApiHandler.Codec.writeStr(dtabs)}</script>
"""
rsp
}
}
| denverwilliams/linkerd | interpreter/namerd/src/main/scala/io/buoyant/namerd/iface/NamerdHandler.scala | Scala | apache-2.0 | 1,636 |
package org.scalacheck.ops
import org.scalacheck.{Gen, Test}
import org.scalacheck.rng.Seed
import org.scalatest.freespec.AnyFreeSpec
class SeededGenSpec extends AnyFreeSpec {
private val it = classOf[SeededGen[_, _, _]].getSimpleName
private implicit val c: GenConfig = GenConfig(Seed(1))
private val p = Test.Parameters.default
def itShouldMakeTheSeedAvailableToTheGenerator[S, T <: SeededGen.Tag](
suffix: String,
buildSeed: Int => S,
buildGen: SeededGen.GenFn[S, T, S] => SeededGen[S, T, S]
): Unit = {
s"$it$suffix should make the given seed available to the generators" in {
val seed = buildSeed(1)
val seededGen: SeededGen[S, T, S] = buildGen { seed =>
Gen.const(seed)
}
val result = seededGen.instance(seed)
assertResult(seed)(result)
}
}
def itShouldBehaveLikeASeededGen[S, T <: SeededGen.Tag](
suffix: String,
buildSeed: Int => S,
buildGen: SeededGen.GenFn[S, T, String] => SeededGen[S, T, String]
): Unit = {
s"$it$suffix should generate the same values given the same seed" in {
val seed = buildSeed(2)
val seededGen = buildGen { _ =>
Gen.alphaNumStr
}
val gen1 = seededGen.gen(seed)
val gen2 = seededGen.gen(seed)
(gen1 == gen2).check(p) // compares 100 results to see if the generators are equivalent
}
s"$it$suffix should generate different values for different seeds" in {
val seed1 = buildSeed(3)
val seed2 = buildSeed(4)
val seededGen = buildGen { _ =>
Gen.alphaNumStr
}
val gen1 = seededGen.gen(seed1)
val gen2 = seededGen.gen(seed2)
(gen1 != gen2).check(p)
}
}
itShouldMakeTheSeedAvailableToTheGenerator[String, TestSeeded](
".Companion.gen",
_.toString,
TestSeeded.gen
)
itShouldMakeTheSeedAvailableToTheGenerator[UserId, UserId.Tag](
" using a custom seed type",
idx => UserId(s"test-user-$idx"),
SeededGen.seededWith[UserId].taggedWith[UserId.Tag].build
)
itShouldMakeTheSeedAvailableToTheGenerator[String, TestSeeded](
".seededWith[String].taggedWith[TestSeed]",
_.toString,
SeededGen.seededWith[String].taggedWith[TestSeeded].build
)
itShouldMakeTheSeedAvailableToTheGenerator[Long, TestSeeded](
".taggedWith[TestSeed].seededWith[Long]",
_.toLong,
SeededGen.taggedWith[TestSeeded].seededWith[Long].build
)
itShouldBehaveLikeASeededGen[String, TestSeeded](
".seededWith[String].taggedWith[TestSeed]",
_.toString,
SeededGen.seededWith[String].taggedWith[TestSeeded].build
)
itShouldBehaveLikeASeededGen[Long, TestSeeded](
".taggedWith[TestSeed].seededWith[Long]",
_.toLong,
SeededGen.taggedWith[TestSeeded].seededWith[Long].build
)
itShouldBehaveLikeASeededGen[String, TestSeeded](
".Companion.gen",
_.toString,
TestSeeded.gen
)
itShouldBehaveLikeASeededGen[UserId, UserId.Tag](
" using a custom seed type",
idx => UserId(s"test-user-$idx"),
SeededGen.seededWith[UserId].taggedWith[UserId.Tag].build
)
s"$it should pass the seed between seeded generators" in {
val seed = "test-seed1"
val seededGen1 = TestSeeded.gen { _ =>
Gen.identifier
}
val seededGen2 = TestSeeded.gen { implicit seed =>
for {
s <- seededGen1
id2 <- Gen.identifier
} yield s"$seed-$s-$id2"
}
val result = seededGen2.instance(seed)
assert(result.contains(seed))
}
s"$it.map should map the underlying generator" in {
val seed = "test-seed1"
val seededGen1 = TestSeeded.gen { implicit seed =>
for {
id <- Gen.identifier
} yield s"$seed-$id"
}
val prefix = "prefix"
val seededGen2 = seededGen1.andThen(_.map(s => s"$prefix-$s"))
val result = seededGen2.instance(seed)
assert(result.startsWith(s"$prefix-$seed-"))
}
private case class UserId(value: String)
private object UserId extends SeededGen.Companion[UserId](SeedExtractor.from(_.value)) {
sealed abstract class Tag extends SeededGen.Tag
}
private sealed abstract class TestSeeded extends SeededGen.Tag
private object TestSeeded extends SeededGen.TagCompanion[String, TestSeeded]
}
| jeffmay/scalacheck-ops | core/src/test/scala/org/scalacheck/ops/SeededGenSpec.scala | Scala | apache-2.0 | 4,206 |
package nestor
package api
import akka.actor._
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._
import scala.concurrent.Future
import org.eligosource.eventsourced.core._
import domain.Person, Person._
final class PersonApi(
coll: CollReadOnly[Person],
processor: ActorRef
)(implicit system: ActorSystem) extends CrudApi[Person, Person.Data] {
def createForm = Person.Form create { doc ⇒ byDocument(doc).isEmpty }
def updateForm(person: Person) = Person.Form.update(person, doc ⇒
byDocument(doc).fold(true)(_.id == person.id)
)
// Consistent reads
def all = coll.all
val byId = coll.byId _
private def byDocument(document: String) = coll find (_.document == document)
// Updates
private implicit val timeout = Timeout(5 seconds)
def create(data: Data): Future[Valid[Person]] =
(processor ? Message(PersonApi.Create(data))).mapTo[Valid[Person]]
def update(person: Person, data: Data) =
(processor ? Message(PersonApi.Update(person.id, data))).mapTo[Valid[Person]]
}
private[api] object PersonApi {
import play.api.libs.json._
sealed trait WithJs {
def js: String
def data = read(js)
}
case class Create(js: String) extends WithJs
def Create(data: Data) = new Create(write(data))
case class Update(id: Int, js: String) extends WithJs
def Update(id: Int, data: Data) = new Update(id, write(data))
private def write(data: Data): String = Json stringify (Json.writes[Data] writes data)
private def read(js: String): Valid[Data] = jsValid(Json.reads[Data] reads (Json parse js))
}
// -------------------------------------------------------------------------------------------------------------
// PersonProcessor is single writer to coll, so we can have reads and writes in separate transactions
// -------------------------------------------------------------------------------------------------------------
class PersonProcessor(coll: Coll[Person]) extends Actor { this: Emitter ⇒
def receive = {
case create: PersonApi.Create ⇒ sender ! create.data.flatMap(_.apply map coll.insert)
case update: PersonApi.Update ⇒ sender ! update.data.flatMap(_.apply map (f ⇒ coll.update(f(update.id))))
}
}
| ornicar/nestor | app/api/PersonApi.scala | Scala | mit | 2,241 |
package com.scalaAsm.x86
package Instructions
package General
// Description: Bit Test and Complement
// Category: general/bit
trait BTC extends InstructionDefinition {
val mnemonic = "BTC"
}
object BTC extends TwoOperands[BTC] with BTCImpl
trait BTCImpl extends BTC {
implicit object _0 extends TwoOp[rm16, imm8] {
val opcode: TwoOpcodes = (0x0F, 0xBA) /+ 7
val format = RmImmFormat
}
implicit object _1 extends TwoOp[rm32, imm8] {
val opcode: TwoOpcodes = (0x0F, 0xBA) /+ 7
val format = RmImmFormat
}
implicit object _2 extends TwoOp[rm64, imm8] {
val opcode: TwoOpcodes = (0x0F, 0xBA) /+ 7
override def prefix = REX.W(true)
val format = RmImmFormat
}
implicit object _3 extends TwoOp[rm16, r16] {
val opcode: TwoOpcodes = (0x0F, 0xBB) /r
val format = MemRegFormat
}
implicit object _4 extends TwoOp[rm32, r32] {
val opcode: TwoOpcodes = (0x0F, 0xBB) /r
val format = MemRegFormat
}
implicit object _5 extends TwoOp[rm64, r64] {
val opcode: TwoOpcodes = (0x0F, 0xBB) /r
override def prefix = REX.W(true)
val format = MemRegFormat
}
}
| bdwashbu/scala-x86-inst | src/main/scala/com/scalaAsm/x86/Instructions/General/BTC.scala | Scala | apache-2.0 | 1,126 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import java.security.PrivilegedExceptionAction
import java.util.{Arrays, Map => JMap}
import java.util.concurrent.{Executors, RejectedExecutionException, TimeUnit}
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.util.control.NonFatal
import org.apache.hadoop.hive.metastore.api.FieldSchema
import org.apache.hadoop.hive.shims.Utils
import org.apache.hive.service.cli._
import org.apache.hive.service.cli.operation.ExecuteStatementOperation
import org.apache.hive.service.cli.session.HiveSession
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{DataFrame, Row => SparkRow, SQLContext}
import org.apache.spark.sql.execution.HiveResult.{getTimeFormatters, toHiveString, TimeFormatters}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.VariableSubstitution
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
import org.apache.spark.util.{Utils => SparkUtils}
private[hive] class SparkExecuteStatementOperation(
val sqlContext: SQLContext,
parentSession: HiveSession,
statement: String,
confOverlay: JMap[String, String],
runInBackground: Boolean = true,
queryTimeout: Long)
extends ExecuteStatementOperation(parentSession, statement, confOverlay, runInBackground)
with SparkOperation
with Logging {
// If a timeout value `queryTimeout` is specified by users and it is smaller than
// a global timeout value, we use the user-specified value.
// This code follows the Hive timeout behaviour (See #29933 for details).
private val timeout = {
val globalTimeout = sqlContext.conf.getConf(SQLConf.THRIFTSERVER_QUERY_TIMEOUT)
if (globalTimeout > 0 && (queryTimeout <= 0 || globalTimeout < queryTimeout)) {
globalTimeout
} else {
queryTimeout
}
}
private val forceCancel = sqlContext.conf.getConf(SQLConf.THRIFTSERVER_FORCE_CANCEL)
private val substitutorStatement = SQLConf.withExistingConf(sqlContext.conf) {
new VariableSubstitution().substitute(statement)
}
private var result: DataFrame = _
private var iter: FetchIterator[SparkRow] = _
private var dataTypes: Array[DataType] = _
private lazy val resultSchema: TableSchema = {
if (result == null || result.schema.isEmpty) {
new TableSchema(Arrays.asList(new FieldSchema("Result", "string", "")))
} else {
logInfo(s"Result Schema: ${result.schema}")
SparkExecuteStatementOperation.getTableSchema(result.schema)
}
}
def addNonNullColumnValue(
from: SparkRow,
to: ArrayBuffer[Any],
ordinal: Int,
timeFormatters: TimeFormatters): Unit = {
dataTypes(ordinal) match {
case StringType =>
to += from.getString(ordinal)
case IntegerType =>
to += from.getInt(ordinal)
case BooleanType =>
to += from.getBoolean(ordinal)
case DoubleType =>
to += from.getDouble(ordinal)
case FloatType =>
to += from.getFloat(ordinal)
case DecimalType() =>
to += from.getDecimal(ordinal)
case LongType =>
to += from.getLong(ordinal)
case ByteType =>
to += from.getByte(ordinal)
case ShortType =>
to += from.getShort(ordinal)
case BinaryType =>
to += from.getAs[Array[Byte]](ordinal)
// SPARK-31859, SPARK-31861: Date and Timestamp need to be turned to String here to:
// - respect spark.sql.session.timeZone
// - work with spark.sql.datetime.java8API.enabled
// These types have always been sent over the wire as string, converted later.
case _: DateType | _: TimestampType =>
to += toHiveString((from.get(ordinal), dataTypes(ordinal)), false, timeFormatters)
case CalendarIntervalType =>
to += toHiveString(
(from.getAs[CalendarInterval](ordinal), CalendarIntervalType),
false,
timeFormatters)
case _: ArrayType | _: StructType | _: MapType | _: UserDefinedType[_] |
_: YearMonthIntervalType | _: DayTimeIntervalType | _: TimestampNTZType =>
to += toHiveString((from.get(ordinal), dataTypes(ordinal)), false, timeFormatters)
}
}
def getNextRowSet(order: FetchOrientation, maxRowsL: Long): RowSet = withLocalProperties {
try {
sqlContext.sparkContext.setJobGroup(statementId, substitutorStatement, forceCancel)
getNextRowSetInternal(order, maxRowsL)
} finally {
sqlContext.sparkContext.clearJobGroup()
}
}
private def getNextRowSetInternal(
order: FetchOrientation,
maxRowsL: Long): RowSet = withLocalProperties {
log.info(s"Received getNextRowSet request order=${order} and maxRowsL=${maxRowsL} " +
s"with ${statementId}")
validateDefaultFetchOrientation(order)
assertState(OperationState.FINISHED)
setHasResultSet(true)
val resultRowSet: RowSet = RowSetFactory.create(getResultSetSchema, getProtocolVersion, false)
if (order.equals(FetchOrientation.FETCH_FIRST)) {
iter.fetchAbsolute(0)
} else if (order.equals(FetchOrientation.FETCH_PRIOR)) {
iter.fetchPrior(maxRowsL)
} else {
iter.fetchNext()
}
resultRowSet.setStartOffset(iter.getPosition)
if (!iter.hasNext) {
resultRowSet
} else {
val timeFormatters = getTimeFormatters
// maxRowsL here typically maps to java.sql.Statement.getFetchSize, which is an int
val maxRows = maxRowsL.toInt
var curRow = 0
while (curRow < maxRows && iter.hasNext) {
val sparkRow = iter.next()
val row = ArrayBuffer[Any]()
var curCol = 0
while (curCol < sparkRow.length) {
if (sparkRow.isNullAt(curCol)) {
row += null
} else {
addNonNullColumnValue(sparkRow, row, curCol, timeFormatters)
}
curCol += 1
}
resultRowSet.addRow(row.toArray.asInstanceOf[Array[Object]])
curRow += 1
}
log.info(s"Returning result set with ${curRow} rows from offsets " +
s"[${iter.getFetchStart}, ${iter.getPosition}) with $statementId")
resultRowSet
}
}
def getResultSetSchema: TableSchema = resultSchema
override def runInternal(): Unit = {
setState(OperationState.PENDING)
logInfo(s"Submitting query '$statement' with $statementId")
HiveThriftServer2.eventManager.onStatementStart(
statementId,
parentSession.getSessionHandle.getSessionId.toString,
statement,
statementId,
parentSession.getUsername)
setHasResultSet(true) // avoid no resultset for async run
if (timeout > 0) {
val timeoutExecutor = Executors.newSingleThreadScheduledExecutor()
timeoutExecutor.schedule(new Runnable {
override def run(): Unit = {
try {
timeoutCancel()
} catch {
case NonFatal(e) =>
setOperationException(new HiveSQLException(e))
logError(s"Error cancelling the query after timeout: $timeout seconds")
} finally {
timeoutExecutor.shutdown()
}
}
}, timeout, TimeUnit.SECONDS)
}
if (!runInBackground) {
execute()
} else {
val sparkServiceUGI = Utils.getUGI()
// Runnable impl to call runInternal asynchronously,
// from a different thread
val backgroundOperation = new Runnable() {
override def run(): Unit = {
val doAsAction = new PrivilegedExceptionAction[Unit]() {
override def run(): Unit = {
registerCurrentOperationLog()
try {
withLocalProperties {
execute()
}
} catch {
case e: HiveSQLException => setOperationException(e)
}
}
}
try {
sparkServiceUGI.doAs(doAsAction)
} catch {
case e: Exception =>
setOperationException(new HiveSQLException(e))
logError("Error running hive query as user : " +
sparkServiceUGI.getShortUserName(), e)
}
}
}
try {
// This submit blocks if no background threads are available to run this operation
val backgroundHandle =
parentSession.getSessionManager().submitBackgroundOperation(backgroundOperation)
setBackgroundHandle(backgroundHandle)
} catch {
case rejected: RejectedExecutionException =>
logError("Error submitting query in background, query rejected", rejected)
setState(OperationState.ERROR)
HiveThriftServer2.eventManager.onStatementError(
statementId, rejected.getMessage, SparkUtils.exceptionString(rejected))
throw HiveThriftServerErrors.taskExecutionRejectedError(rejected)
case NonFatal(e) =>
logError(s"Error executing query in background", e)
setState(OperationState.ERROR)
HiveThriftServer2.eventManager.onStatementError(
statementId, e.getMessage, SparkUtils.exceptionString(e))
throw new HiveSQLException(e)
}
}
}
private def execute(): Unit = {
try {
synchronized {
if (getStatus.getState.isTerminal) {
logInfo(s"Query with $statementId in terminal state before it started running")
return
} else {
logInfo(s"Running query with $statementId")
setState(OperationState.RUNNING)
}
}
// Always use the latest class loader provided by executionHive's state.
val executionHiveClassLoader = sqlContext.sharedState.jarClassLoader
Thread.currentThread().setContextClassLoader(executionHiveClassLoader)
// Always set the session state classloader to `executionHiveClassLoader` even for sync mode
if (!runInBackground) {
parentSession.getSessionState.getConf.setClassLoader(executionHiveClassLoader)
}
sqlContext.sparkContext.setJobGroup(statementId, substitutorStatement, forceCancel)
result = sqlContext.sql(statement)
logDebug(result.queryExecution.toString())
HiveThriftServer2.eventManager.onStatementParsed(statementId,
result.queryExecution.toString())
iter = if (sqlContext.getConf(SQLConf.THRIFTSERVER_INCREMENTAL_COLLECT.key).toBoolean) {
new IterableFetchIterator[SparkRow](new Iterable[SparkRow] {
override def iterator: Iterator[SparkRow] = result.toLocalIterator.asScala
})
} else {
new ArrayFetchIterator[SparkRow](result.collect())
}
dataTypes = result.schema.fields.map(_.dataType)
} catch {
// Actually do need to catch Throwable as some failures don't inherit from Exception and
// HiveServer will silently swallow them.
case e: Throwable =>
// When cancel() or close() is called very quickly after the query is started,
// then they may both call cleanup() before Spark Jobs are started. But before background
// task interrupted, it may have start some spark job, so we need to cancel again to
// make sure job was cancelled when background thread was interrupted
if (statementId != null) {
sqlContext.sparkContext.cancelJobGroup(statementId)
}
val currentState = getStatus().getState()
if (currentState.isTerminal) {
// This may happen if the execution was cancelled, and then closed from another thread.
logWarning(s"Ignore exception in terminal state with $statementId: $e")
} else {
logError(s"Error executing query with $statementId, currentState $currentState, ", e)
setState(OperationState.ERROR)
HiveThriftServer2.eventManager.onStatementError(
statementId, e.getMessage, SparkUtils.exceptionString(e))
e match {
case _: HiveSQLException => throw e
case _ => throw HiveThriftServerErrors.runningQueryError(e)
}
}
} finally {
synchronized {
if (!getStatus.getState.isTerminal) {
setState(OperationState.FINISHED)
HiveThriftServer2.eventManager.onStatementFinish(statementId)
}
}
sqlContext.sparkContext.clearJobGroup()
}
}
def timeoutCancel(): Unit = {
synchronized {
if (!getStatus.getState.isTerminal) {
logInfo(s"Query with $statementId timed out after $timeout seconds")
setState(OperationState.TIMEDOUT)
cleanup()
HiveThriftServer2.eventManager.onStatementTimeout(statementId)
}
}
}
override def cancel(): Unit = {
synchronized {
if (!getStatus.getState.isTerminal) {
logInfo(s"Cancel query with $statementId")
setState(OperationState.CANCELED)
cleanup()
HiveThriftServer2.eventManager.onStatementCanceled(statementId)
}
}
}
override protected def cleanup(): Unit = {
if (runInBackground) {
val backgroundHandle = getBackgroundHandle()
if (backgroundHandle != null) {
backgroundHandle.cancel(true)
}
}
// RDDs will be cleaned automatically upon garbage collection.
if (statementId != null) {
sqlContext.sparkContext.cancelJobGroup(statementId)
}
}
}
object SparkExecuteStatementOperation {
def getTableSchema(structType: StructType): TableSchema = {
val schema = structType.map { field =>
val attrTypeString = field.dataType match {
case CalendarIntervalType => StringType.catalogString
case _: YearMonthIntervalType => "interval_year_month"
case _: DayTimeIntervalType => "interval_day_time"
case _: TimestampNTZType => "timestamp"
case other => other.catalogString
}
new FieldSchema(field.name, attrTypeString, field.getComment.getOrElse(""))
}
new TableSchema(schema.asJava)
}
}
| jiangxb1987/spark | sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkExecuteStatementOperation.scala | Scala | apache-2.0 | 14,735 |
/******************************************************************************
Copyright (c) 2013-2014, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.widl
import scala.collection.mutable.{HashMap => MHashMap, HashSet => MHashSet, ListBuffer}
import kr.ac.kaist.jsaf.analysis.cfg._
import kr.ac.kaist.jsaf.analysis.cfg.{Node => CNode, InternalError => IError}
import kr.ac.kaist.jsaf.analysis.typing._
import kr.ac.kaist.jsaf.analysis.typing.{SemanticsExpr => SE}
import kr.ac.kaist.jsaf.analysis.typing.models._
import kr.ac.kaist.jsaf.analysis.typing.models.builtin.{BuiltinArray, BuiltinDate}
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T, _}
import kr.ac.kaist.jsaf.analysis.typing.domain.Heap
import kr.ac.kaist.jsaf.analysis.typing.domain.Context
import kr.ac.kaist.jsaf.nodes._
import kr.ac.kaist.jsaf.scala_src.nodes._
import java.util.{List => JList}
import kr.ac.kaist.jsaf.nodes_util.IRFactory
import kr.ac.kaist.jsaf.scala_src.useful.Lists._
import scala.Some
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
class WIDLModel(cfg: CFG) extends Model(cfg) {
val verbose = false
////////////////////////////////////////////////////////////////////////////////
// Model Maps
////////////////////////////////////////////////////////////////////////////////
private var map_fid = Map[FunctionId, String]()
private var map_semantic = Map[String, SemanticFun]()
private val map_presemantic = Map[String, SemanticFun]()
private val map_def = Map[String, AccessFun]()
private val map_use = Map[String, AccessFun]()
// WType to Location map
private var type2locMap = new MHashMap[String, Loc]
private var functionType2locMap = new MHashMap[String, Loc]
private var WIDLDateMockupLoc: Loc = 0
def getType(typ: WType): Option[String] = WIDLTypeMap.getType(typ)
////////////////////////////////////////////////////////////////////////////////
// Initialization List
////////////////////////////////////////////////////////////////////////////////
type InitList = ListBuffer[LocPropMap]
val initList = new InitList
def applyInitList(heap: Heap): Heap = {
var newHeap = heap
for(locProps <- initList) {
val (loc, props) = (locProps._1, locProps._2)
/* List[(String, PropValue, Option[(Loc, Obj)], Option[FunctionId] */
val prepareList = props.map(x => prepareForUpdate("WIDL", x._1, x._2))
for(prepare <- prepareList) {
val (name, propValue, obj, func) = prepare
/* added function object to heap if any */
obj match {
case Some((loc, obj)) => newHeap = Heap(newHeap.map.updated(loc, obj))
case None =>
}
/* update api function map */
func match {
case Some((fid, name)) => map_fid = map_fid + (fid -> name)
case None => Unit
}
}
/* api object */
val obj = newHeap.map.get(loc) match {
case Some(old) => prepareList.foldLeft(old)((o, prepare) => o.update(prepare._1, prepare._2))
case None => prepareList.foldLeft(Obj.empty)((o, prepare) => o.update(prepare._1, prepare._2))
}
/* added api object to heap */
newHeap = Heap(newHeap.map.updated(loc, obj))
}
initList.clear()
newHeap
}
////////////////////////////////////////////////////////////////////////////////
// Making Mockup value for WIDL Type
////////////////////////////////////////////////////////////////////////////////
// WARNING:
// Following functions are used both before and while analysis!
// DO NOT make new locations while refactoring following functions.
// If you do that, that can BREAK analysis
def WType2Mockup(typ: WType): Value = typ match {
case SWAnyType(info, suffix) => Value(PValueTop)
case SWNamedType(info, suffix, name2) => name2 match {
case "any" => Value(PValueTop)
case "void" => Value(UndefTop)
case "boolean" => Value(BoolTop)
case "byte" | "octet" | "short" | "unsigned short" |
"long" | "unsigned long" | "long long" | "unsigned long long" |
"float" | "unrestricted float" | "double" | "unrestricted double" =>
Value(NumTop)
case "DOMString" => Value(StrTop)
case "Date" => Value(WIDLDateMockupLoc)
case _enum_ if WIDLTypeMap.enumMap.contains(_enum_) => {
// enum
val enumNode = WIDLTypeMap.enumMap(_enum_)
var absStr: AbsString = StrBot
val i = enumNode.getEnumValueList.iterator()
while(i.hasNext) absStr+= AbsString.alpha(i.next().getStr)
Value(absStr)
}
case _typedef_ if WIDLTypeMap.typedefMap.contains(_typedef_) => {
// typedef
WType2Mockup(WIDLTypeMap.typedefMap(_typedef_).getTyp)
}
case _interface_ if WIDLTypeMap.interfaceMap.contains(_interface_) => {
Value(type2locMap(_interface_))
}
case _dictionary_ if WIDLTypeMap.dictionaryMap.contains(_dictionary_) => {
// dictionary
Value(type2locMap(_dictionary_))
}
case _callback_ if WIDLTypeMap.callbackMap.contains(_callback_) => {
// callback
Value(functionType2locMap(_callback_))
}
case _ => {
Value(PValueTop)
}
}
case SWArrayType(info, suffix, type2) => getType(type2) match {
case Some(type3) => type2locMap.get(type3) match {
case Some(l) => Value(l)
case _ => Value(PValueTop)
}
case _ => Value(PValueTop)
}
case SWSequenceType(info, suffix, type2) => getType(type2) match {
case Some(type3) => type2locMap.get(type3) match {
case Some(l) => Value(l)
case _ => Value(PValueTop)
}
case _ => Value(PValueTop)
}
case SWUnionType(info, suffix, types) =>
types.foldLeft(ValueBot)((value, typ) => value + WType2Mockup(typ))
}
////////////////////////////////////////////////////////////////////////////////
// Helper
////////////////////////////////////////////////////////////////////////////////
def getNewLoc(h: Heap, ctx: Context, cfg: CFG, cp: ControlPoint, alloc: () => Int): (Heap, Context, Loc) = {
val lset_env = h(SinglePureLocalLoc)("@env")._2._2
val set_addr = lset_env.foldLeft[Set[Address]](Set())((a, l) => a + locToAddr(l))
if (set_addr.size > 1) throw new IError("API heap allocation: Size of env address is " + set_addr.size)
val addr_env = (cp._1._1, set_addr.head)
val addr = cfg.getAPIAddress(addr_env, alloc())
val l = addrToLoc(addr, Recent)
val (h_1, ctx_1) = Helper.Oldify(h, ctx, addr)
(h_1, ctx_1, l)
}
def mkDateObj: Obj = {
Obj.empty.
update("@class", PropValue(AbsString.alpha("Date"))).
update("@proto", PropValue(ObjectValue(Value(BuiltinDate.ProtoLoc), F, F, F))).
update("@extensible", PropValue(T)).
update("@primitive", PropValue(Value(NumTop)))
}
def mkInterfaceObj(interf: WInterface): Obj = {
val proto: Loc = interf.getParent.isSome match {
case true => {
val parentName = interf.getParent.unwrap().getName
type2locMap.get(parentName) match {
case Some(parentLoc) => parentLoc
case _ => ObjProtoLoc // is it possible?
}
}
case false => ObjProtoLoc
}
val initObj: Obj = Helper.NewObject(proto)
toList(interf.getMembers).foldLeft(initObj)((obj, mem) => mem match {
case SWConst(_, _, t, n, v) => {
val constValue = PropValue(ObjectValue(WIDLHelper.WLiteral2Value(v), F, T, F))
obj.update(n, constValue)
}
case attribute@SWAttribute(_, attrs, t, n, _) => {
val isUnforgeable = WIDLHelper.isUnforgeable(attribute)
val isWritable = AbsBool.alpha(!WIDLHelper.isReadOnly(mem.asInstanceOf[WMember]))
val isConfigurable = AbsBool.alpha(!isUnforgeable)
obj.update(n, PropValue(ObjectValue(WType2Mockup(t), isWritable, T, isConfigurable)))
/*
// If the attribute was declared with the [Unforgeable] extended attribute,
// then the property exists on every object that implements the interface.
// Otherwise, it exists on the interface’s interface prototype object.
if(isUnforgeable) {
// Implements case
obj.update(n, PropValue(ObjectValue(WType2Mockup(t), isWritable, T, isConfigurable)))
} else {
// Assumption: prototype object has a property named n typed t
obj.update(n, PropValue(ObjectValue(WType2Mockup(t), isWritable, T, isConfigurable)))
}
*/
}
case SWOperation(_, _, _, returnType, n, args, _) => n match {
case Some(name) => {
// TODO: consider static function
obj.update(name, PropValue(ObjectValue(Value(functionType2locMap(interf.getName+"."+name)), T, T, T)))
}
case None => obj
}
case _ => obj
})
}
def mkDictionaryObj(dic: WDictionary): Obj = {
val initObj: Obj = Helper.NewObject(ObjProtoLoc)
toList(dic.getMembers).foldLeft(initObj)((obj, mem) => {
obj.update(mem.getName, PropValue(ObjectValue(WType2Mockup(mem.getTyp), T, T, T))).absentTop(mem.getName)
})
}
def mkArrayObj(typ: WType): Obj = {
Helper.NewArrayObject(UInt).
update(NumStr, PropValue(ObjectValue(WType2Mockup(typ), T, T, T)))
}
def mkFunctionProps(name: String, argSize: Int): PropMap = {
val props = new PropMap
props.put("@class", AbsConstValue(PropValue(AbsString.alpha("Function"))))
props.put("@proto", AbsConstValue(PropValue(ObjectValue(Value(FunctionProtoLoc), F, F, F))))
props.put("@extensible", AbsConstValue(PropValue(T)))
props.put("@scope", AbsConstValue(PropValue(Value(NullTop))))
props.put("@function", AbsInternalFunc(name))
props.put("@construct", AbsInternalFunc(name + ".constructor"))
props.put("@hasinstance", AbsConstValue(PropValue(Value(NullTop))))
props.put("length", AbsConstValue(PropValue(ObjectValue(Value(AbsNumber.alpha(argSize)), F, F, F))))
props
}
def mkInterfOpProps(interf: String, op: WOperation): PropMap = op.getName.isSome match {
case true => mkFunctionProps(interf+"."+op.getName.unwrap(), op.getArgs.size) //TODO: consider static function
case false => { // unreachable for Samsung API
new PropMap
}
}
def mkCallbackProps(callback: WCallback): PropMap = {
mkFunctionProps(callback.getName, callback.getArgs.size) //TODO: consider optional arguments
}
////////////////////////////////////////////////////////////////////////////////
// Semantic Function
////////////////////////////////////////////////////////////////////////////////
val returnTypeMap = new MHashMap[String, WType]
def successorProvider(): () => Int = {
var idx: Int = 0
() => {
idx = idx + 1
idx
}
}
def semantic(sem: Semantics, _heap: Heap, _context: Context, heapExc: Heap, contextExc: Context, cp: ControlPoint, cfg: CFG, funcName: String, args: CFGExpr): ((Heap, Context),(Heap, Context)) = {
var heap: Heap = _heap
var context: Context = _context
// Constructor call
if(funcName.endsWith(".constructor")) {
var isCorrectArgument = true
WIDLTypeMap.constructorMap.get(funcName.substring(0, funcName.length - 12)) match {
case Some(constructorList) =>
// Collect constructor argument sizes
val constructorArgSizeSet = new MHashSet[(Int, Int)]
for(constructor <- constructorList) {
val args = constructor.getArgs
val argMinSize = args.size - WIDLHelper.getOptionalParameterCount(args)
val argMaxSize = args.size
constructorArgSizeSet.add((argMinSize, argMaxSize))
}
// Check the argument size for each argument location
val argLocSet = SemanticsExpr.V(args, heap, context)._1.locset
for(argLoc <- argLocSet) {
if(isCorrectArgument) {
val absnum = heap(argLoc)("length").objval.value.pvalue.numval
absnum.getSingle match {
case Some(n) if AbsNumber.isNum(absnum) =>
isCorrectArgument = constructorArgSizeSet.exists(size => n >= size._1 && n <= size._2)
case _ =>
}
}
}
case None => isCorrectArgument = false
}
if(!isCorrectArgument) {
// Throw TypeError
val (newHeapExc, newContextExc) = Helper.RaiseException(heap, context, Set[Exception](TypeError))
return ((HeapBot, ContextBot), (heapExc + newHeapExc, contextExc + newContextExc))
}
} else { // Assume that the function is always form of "[Interface].prototype.[Function]"
val splitByDot: Array[String] = funcName.split('.')
val interface: String = splitByDot(0)
val func: String = splitByDot(splitByDot.size-1)
val alloc: () => Int = successorProvider()
/* TODO: overload resolution
val overloadSet: List[WInterfaceMember] =
WIDLTypeMap.getMembers(interface).filter(p => p._2._1.equals(func)).map(p => p._2._2)
*/
val current: Option[(String, WInterfaceMember)] = WIDLTypeMap.getMembers(interface).find(p => p._2._1.equals(func)) match {
case Some(pair) => Some(pair._2)
case None => None
}
val params: JList[WArgument] = current match { // Now, we get the arguments of the API
case Some((_, op: WOperation)) => op.getArgs
case _ => null
}
if (params != null) { // We want to invoke callback functions that we get from arguments
val dummyInfo = IRFactory.makeInfo(IRFactory.dummySpan("Model"))
var containsCallback = false
// 1. collect callback arguments & make arguments for each callbacks
var loc_arg_set: Set[(Loc, Loc)] = Set()
def aux(warg: JList[WArgument], callbackV: Value) = {
// 1.1. get the actual callback function that we want to invoke from cfg
val lset_f = callbackV._2.filter((l) => (T <= Helper.IsCallable(heap, l))) // location set
// 1.2. make an argument object
var argObj: Obj = Helper.NewArgObject(AbsNumber.alpha(params.size()))
for (j <- 0 until warg.size) warg.get(j) match {
case SWArgument(_, attrs, t, _, _) => {
val v_j = WType2Mockup(t)
val pv: PropValue = PropValue(ObjectValue(v_j, T, T, T))
argObj = argObj.update(j.toString, pv)
}
}
argObj = argObj.update("callee", PropValue(ObjectValue(Value(lset_f), T, F, T)))
val nl = getNewLoc(heap, context, cfg, cp, alloc)
heap = nl._1
context = nl._2
heap = heap.update(nl._3, argObj)
// 1.3. collect the callback functions with the arguments
lset_f.foreach((l) => loc_arg_set += ((l, nl._3)))
}
for (i <- 0 until params.size()) params.get(i) match { // for each [param]eters in the WIDL specification
case SWArgument(_, attrs, t, _, _) => {
t match {
case SWNamedType(_, _, typ) =>
WIDLTypeMap.callbackMap.get(typ) match {
case Some(callback) => { // now we get the parameter which is a callback function
containsCallback = true
//System.out.format("now... %s\n", typ) // DEBUG
val callbackObj = SE.V(CFGLoad(dummyInfo, args, CFGString(i.toString)), heap, context)._1
WIDLTypeMap.interfaceMap.get(typ) match {
case Some(interf) => // not FunctionOnly
val mems: JList[WInterfaceMember] = interf.getMembers
callbackObj._2.foreach(l => {
try {
for (j <- 0 until mems.size()) {
val op: WOperation = mems.get(j).asInstanceOf[WOperation]
if (!(heap(l)(op.getName.unwrap()) </ PropValueBot)) throw new UnknownError()
}
for (j <- 0 until mems.size()) {
val op: WOperation = mems.get(j).asInstanceOf[WOperation]
val callbackPropV = heap(l)(op.getName.unwrap())
aux(op.getArgs, callbackPropV._1._1)
}
} catch {
case _: Throwable =>
}
})
case _ => // FunctionOnly
aux(callback.getArgs, callbackObj)
}
}
case _ =>
}
case _ =>
}
}
}
// 2. add call edges to cfg for invoking callback function
if (containsCallback) {
// 2.1. set this value
val l_this = GlobalLoc
val v_this = Value(LocSet(l_this))
//val (callee_this, h_temp, ctx_temp, es) = Helper.toObject(current_heap, current_context, v_this, )
// 2.2. call the functions(1) with the argument object(2)
val nl_2 = getNewLoc(heap, context, cfg, cp, alloc)
heap = nl_2._1
context = nl_2._2
val l_r = nl_2._3
val o_old = heap(SinglePureLocalLoc)
val cc_caller = cp._2
val n_aftercall = cfg.getAftercallFromCall(cp._1)
val cp_aftercall = (n_aftercall, cc_caller)
val n_aftercatch = cfg.getAftercatchFromCall(cp._1)
val cp_aftercatch = (n_aftercatch, cc_caller)
loc_arg_set.foreach(pair => {
val l_f = pair._1
val l_arg = pair._2
val o_f = heap(l_f)
o_f("@function")._3.foreach((fid) => {
cc_caller.NewCallContext(heap, cfg, fid, l_r, v_this._2).foreach((pair) => {
val (cc_new, o_new) = pair
val o_new2 = o_new.
update(cfg.getArgumentsName(fid),
PropValue(ObjectValue(Value(LocSet(l_arg)), T, F, F))).
update("@scope", o_f("@scope"))
sem.addCallEdge(cp, ((fid,LEntry), cc_new), ContextEmpty, o_new2)
sem.addReturnEdge(((fid,LExit), cc_new), cp_aftercall, context, o_old)
sem.addReturnEdge(((fid, LExitExc), cc_new), cp_aftercatch, context, o_old)
})
})
})
// TODO: exceptional heap & context?
/*val s_1 = (heapExc + h_e, contextExc + ctx_e)
((h_5, ctx_4), s_1)*/
}
}
}
// Function call
returnTypeMap.get(funcName) match {
case Some(returnType) =>
initList.clear()
return ((Helper.ReturnStore(applyInitList(heap), WType2Mockup(returnType)), context), (heapExc, contextExc))
case None =>
}
((heap, context), (heapExc, contextExc))
}
////////////////////////////////////////////////////////////////////////////////
// New Object
////////////////////////////////////////////////////////////////////////////////
type LocPropMap = (Loc, PropMap)
type PropMap = MHashMap[String, AbsProperty]
val globalProps = new PropMap
val newLocPropsMap = new MHashMap[String, LocPropMap]
def newObjectLocProps(locName: String, protoLoc: Loc = ObjProtoLoc): LocPropMap = {
val loc = newSystemLoc(locName, Recent)
val props = new PropMap
props.put("@class", AbsConstValue(PropValue(AbsString.alpha("Object"))))
props.put("@proto", AbsConstValue(PropValue(ObjectValue(Value(protoLoc), F, F, F))))
props.put("@extensible", AbsConstValue(PropValue(T)))
val locProps: LocPropMap = (loc, props)
initList.append(locProps)
newLocPropsMap.put(locName, locProps)
locProps
}
def newFunctionLocProps(locName: String, argSize: Int): (LocPropMap, LocPropMap) = {
val protoLocProps = newObjectLocProps(locName + ".prototype")
val loc = newSystemLoc(locName, Recent)
val props = new PropMap
props.put("@class", AbsConstValue(PropValue(AbsString.alpha("Function"))))
props.put("@proto", AbsConstValue(PropValue(ObjectValue(Value(FunctionProtoLoc), F, F, F))))
props.put("@extensible", AbsConstValue(PropValue(T)))
props.put("@scope", AbsConstValue(PropValueNullTop))
props.put("@function", AbsInternalFunc(locName))
props.put("@construct", AbsInternalFunc(locName + ".constructor"))
props.put("@hasinstance", AbsConstValue(PropValueNullTop))
props.put("prototype", AbsConstValue(PropValue(ObjectValue(Value(protoLocProps._1), F, F, F))))
props.put("length", AbsConstValue(PropValue(ObjectValue(Value(AbsNumber.alpha(argSize)), F, F, F))))
val locProps: LocPropMap = (loc, props)
initList.append(locProps)
newLocPropsMap.put(locName, locProps)
(locProps, protoLocProps)
}
def newArrayLocProps(locName: String, defaultNumber: Value): LocPropMap = {
val loc = newSystemLoc(locName, Recent)
val props = new PropMap
props.put("@class", AbsConstValue(PropValue(AbsString.alpha("Array"))))
props.put("@proto", AbsConstValue(PropValue(ObjectValue(Value(BuiltinArray.ProtoLoc), F, F, F))))
props.put("@extensible", AbsConstValue(PropValue(T)))
props.put(Str_default_number, AbsConstValue(PropValue(ObjectValue(defaultNumber, T, T, T))))
props.put("length", AbsConstValue(PropValue(ObjectValue(Value(UInt), T, F, F))))
val locProps: LocPropMap = (loc, props)
initList.append(locProps)
newLocPropsMap.put(locName, locProps)
locProps
}
def newDateLocProps(locName: String): LocPropMap = {
val loc = newSystemLoc(locName, Recent)
val props = new PropMap
props.put("@class", AbsConstValue(PropValue(AbsString.alpha("Date"))))
props.put("@proto", AbsConstValue(PropValue(ObjectValue(Value(BuiltinDate.ProtoLoc), F, F, F))))
props.put("@extensible", AbsConstValue(PropValue(T)))
props.put("@primitive", AbsConstValue(PropValueNumTop))
val locProps: LocPropMap = (loc, props)
initList.append(locProps)
newLocPropsMap.put(locName, locProps)
locProps
}
////////////////////////////////////////////////////////////////////////////////
// Create Web IDL Interface
////////////////////////////////////////////////////////////////////////////////
val interfaceNameIndexMap = new MHashMap[String, Int]
val createdInterfacesMap = new MHashMap[(Loc, String), (LocPropMap, LocPropMap)]
def getNextInterfaceName(name: String): String = {
val i = interfaceNameIndexMap.getOrElseUpdate(name, -1) + 1
if(i >= 0) interfaceNameIndexMap.put(name, i)
name + i
}
def createInterfaceFromName(targetLoc: Loc, interfaceName: String): (LocPropMap, LocPropMap) = {
// Find interface node
WIDLTypeMap.interfaceMap.get(interfaceName) match {
case Some(interfaceNode) => createInterfaceFromNode(targetLoc, interfaceNode)
case None =>
if (verbose)
System.out.println("* \"" + interfaceName + "\" is not an interface.")
(null, null)
}
}
def createInterfaceFromNode(targetLoc: Loc, interfaceNode: WInterface): (LocPropMap, LocPropMap) = {
// Interface name
val interfaceName = interfaceNode.getName
// If this interface is already created then return it
createdInterfacesMap.get((targetLoc, interfaceName)) match {
case Some((locProps, protoLocProps)) => return (locProps, protoLocProps)
case None =>
}
// Get the object for this interface
val (locProps, protoLocProps) = if(targetLoc == -1) {
// Create Object or Function Object
if(!WIDLHelper.isCallback(interfaceNode)) newFunctionLocProps(interfaceName, /* Constructor argument size */ 0)
else (newObjectLocProps(interfaceName), null)
}
else {
// Use the provided object
((targetLoc, new PropMap), null)
}
createdInterfacesMap.put((targetLoc, interfaceName), (locProps, protoLocProps))
initList.append(locProps)
// Bind to global object
if(!WIDLHelper.isNoInterfaceObject(interfaceNode)) {
globalProps.put(interfaceName, AbsConstValue(PropValue(ObjectValue(locProps._1, T, F, T))))
}
// Insert semantic function and return type for a constructor
val constructorName = interfaceName + ".constructor"
map_semantic+= (constructorName -> semantic)
returnTypeMap.put(constructorName, WIDLFactory.mkNamedType(interfaceName))
// Members
val i = interfaceNode.getMembers.iterator()
while(i.hasNext) {
val member = i.next().asInstanceOf[WMember]
member match {
// 4.4.5. Constants
case SWConst(info, attrs, typ, name, value) =>
val constValue = PropValue(ObjectValue(WIDLHelper.WLiteral2Value(value), F, T, F))
locProps._2.put(name, AbsConstValue(constValue))
if(protoLocProps != null) protoLocProps._2.put(name, AbsConstValue(constValue))
else if (verbose)
System.out.println("* SWConst (typ = " + typ + ", name1 = " + name + ", value = " + value + ") of \"" + interfaceName + "\" interface is not created.")
// 4.4.6. Attributes
case attribute@SWAttribute(info, attrs, typ1, name, exns) =>
WIDLHelper.WType2Value(typ1, this) match {
case Some(value) =>
val isUnforgeable = WIDLHelper.isUnforgeable(attribute)
val isWritable = AbsBool.alpha(!WIDLHelper.isReadOnly(member))
val isConfigurable = AbsBool.alpha(!isUnforgeable)
val absProp: AbsProperty = AbsConstValue(PropValue(ObjectValue(value, isWritable, T, isConfigurable)))
// If the attribute was declared with the [Unforgeable] extended attribute,
// then the property exists on every object that implements the interface.
// Otherwise, it exists on the interface’s interface prototype object.
if(isUnforgeable) {
// Implements case
if(targetLoc != -1) locProps._2.put(name, absProp)
}
else {
// Not implements case
if(targetLoc != -1) locProps._2.put(name, absProp)
else protoLocProps._2.put(name, absProp)
}
case None =>
if (verbose)
System.out.println("* SWAttribute(typ1 = " + typ1 + ", name = " + name + ", exns = " + exns + ") of \"" + interfaceName + "\" interface is not created.")
}
// 4.4.7. Operations
case operation@SWOperation(info, attrs, qualifiers, returnType, name, args, exns) =>
name match {
case Some(name) =>
// Select target object
val isCallback = WIDLHelper.isCallback(interfaceNode)
val isStatic = WIDLHelper.isStatic(operation)
val (locPropsSel, funcName) = (isCallback | isStatic) match {
case true => (locProps, interfaceName + '.' + name)
case false => (protoLocProps, interfaceName + ".prototype." + name)
}
// Argument size
val argMinSize = args.length - WIDLHelper.getOptionalParameterCount(args)
val argMaxSize = args.length
WIDLTypeMap.argSizeMap.put(funcName, (argMinSize, argMaxSize))
val containsCallback = args.exists(param => param match {
case SWArgument(_, _, t, _, _) => {
t match {
case SWNamedType(_, _, typ) => WIDLTypeMap.callbackMap.get(typ) match {
case Some(callback) => true
case _ => false
}
case _ => false
}
}
})
if (containsCallback) // if the interface has callback argument
locPropsSel._2.put(name, AbsBuiltinFuncAftercallOptional(funcName, args.length)) // T, T, T ?
else
locPropsSel._2.put(name, AbsBuiltinFunc(funcName, args.length)) // T, T, T ?
// Insert semantic function and return type
map_semantic+= (funcName -> semantic)
returnTypeMap.put(funcName, returnType)
case None =>
if (verbose)
System.out.println("* SWOperation (qualifiers + " + qualifiers + ", returnType = " + returnType + ", name = " + name + ", args = " + args + ", exns = " + exns + ") of \"" + interfaceName + "\" interface is not created.")
}
}
}
// If this interface inherits another interface
if(interfaceNode.getParent.isSome) {
val parentName = interfaceNode.getParent.unwrap().getName
if(!newLocPropsMap.contains(parentName)) createInterfaceFromName(-1, interfaceNode.getParent.unwrap().getName)
getRegisteredRecentLoc(parentName + ".prototype") match {
case Some(parentProtoLoc) => locProps._2.put("@proto", AbsConstValue(PropValue(ObjectValue(Value(parentProtoLoc), F, F, F))))
case None =>
if (verbose)
System.out.println("* \"" + parentName + ".prototype\" does not exist.")
}
}
// If this interface implements another interface
WIDLTypeMap.implementsMap.get(interfaceName) match {
case Some(implementsNodeList) => for(implementsNode <- implementsNodeList) doImplements(protoLocProps._1, implementsNode)
case none =>
}
// Return the created object(interface)
(locProps, protoLocProps)
}
// 4.5. Implements statements
def doImplements(targetLoc: Loc, implementsNode: WImplementsStatement): Unit = {
WIDLTypeMap.interfaceMap.get(implementsNode.getParent) match {
case Some(interfaceNode) => createInterfaceFromNode(targetLoc, interfaceNode)
case None =>
if (verbose)
System.out.println("* \"" + implementsNode.getParent + "\" is not an interface.")
}
}
/**
* Note
* - "interface" and "implements" don't have cycles.
* - We cannot represent loc-top! ("any" type and "object" type has problem... ~_~)
* - If "Window" implements some interface then the interface.prototype's properties are copied
* to "Window" not to "Window.prototype". (Temporary wrong implementation)
*/
def initialize(h: Heap): Heap = {
// 1. initialize & set WType -> Loc maps
// 1.1. initialize
type2locMap = MHashMap[String, Loc]()
functionType2locMap = MHashMap[String, Loc]()
// 1.2. map type to predefined location
WIDLDateMockupLoc = newSystemLoc("WIDLDateMockupLoc", Old)
WIDLTypeMap.interfaceMap.foreach(kv => type2locMap.put(kv._1, newSystemLoc("WIDL"+kv._1+"MockupLoc", Old)))
WIDLTypeMap.dictionaryMap.foreach(kv => type2locMap.put(kv._1, newSystemLoc("WIDL"+kv._1+"MockupLoc", Old)))
WIDLTypeMap.callbackMap.foreach(kv => functionType2locMap.put(kv._1, newSystemLoc("WIDL"+kv._1+"MockupLoc", Old)))
WIDLTypeMap.interfOperationMap.foreach(kv => functionType2locMap.put(kv._1, newSystemLoc("WIDL"+kv._1+"MockupLoc", Old)))
WIDLTypeMap.arrayMap.foreach(kv => type2locMap.put(kv._1, newSystemLoc("WIDL"+kv._1+"MockupLoc", Old)))
// 2. update object mockups (date, interface, dictionary, array)
// 2.1. update a date mockup
val h_0 = h.update(WIDLDateMockupLoc, mkDateObj)
// 2.2. update interface mockups
val h_1 = WIDLTypeMap.interfaceMap.foldLeft(h_0)((_h, kv) => {
val name = kv._1
val typ = kv._2
_h.update(type2locMap(name), mkInterfaceObj(typ))
})
// 2.3. update dictionary mockups
val h_2 = WIDLTypeMap.dictionaryMap.foldLeft(h_1)((_h, kv) => {
val name = kv._1
val typ = kv._2
_h.update(type2locMap(name), mkDictionaryObj(typ))
})
val h_3 = WIDLTypeMap.arrayMap.foldLeft(h_2)((_h, kv) => {
val name = kv._1
val typ = kv._2
_h.update(type2locMap(name), mkArrayObj(typ))
})
// 3. update function mockups
// 3.1. update callback function mockups
WIDLTypeMap.callbackMap.foreach(kv => {
val name: String = kv._1
val callback: WCallback = kv._2
val loc: Loc = functionType2locMap(name)
val locprop: LocPropMap = (loc, mkCallbackProps(callback))
initList.append(locprop)
})
// 3.2. update interface operation mockups
WIDLTypeMap.interfOperationMap.foreach(kv => {
val name: String = kv._1
val interf: String = name.split('.')(0)
val op: WOperation = kv._2
val loc: Loc = functionType2locMap(name)
val locprop: LocPropMap = (loc, mkInterfOpProps(interf, op))
op match {
case operation@SWOperation(_, _, _, returnType, opname, args, _) =>
opname match {
case Some(opname) =>
// Argument size
val argMinSize = args.length - WIDLHelper.getOptionalParameterCount(args)
val argMaxSize = args.length
WIDLTypeMap.argSizeMap.put(name, (argMinSize, argMaxSize))
val containsCallback = args.exists(param => param match {
case SWArgument(_, _, t, _, _) => {
t match {
case SWNamedType(_, _, typ) => WIDLTypeMap.callbackMap.get(typ) match {
case Some(callback) => true
case _ => false
}
case _ => false
}
}
})
if (containsCallback) // if the interface has callback argument
locprop._2.put("@function", AbsInternalFuncAftercallOptional(name)) // T, T, T ?
else
locprop._2.put("@function", AbsInternalFunc(name)) // T, T, T ?
// Insert semantic function and return type
map_semantic+= (name -> semantic)
returnTypeMap.put(name, returnType)
case None =>
}
}
initList.append(locprop)
})
// 4. create interfaces except for no interface objects
// Top-down from "Window" object
WIDLTypeMap.implementsMap.get("Window") match {
case Some(implementsNodeList) => for(implementsNode <- implementsNodeList) doImplements(GlobalLoc, implementsNode)
case None =>
}
// Bind interfaces to "Window" object directly
for((interfaceName, interfaceNode) <- WIDLTypeMap.interfaceMap) {
if(!WIDLHelper.isNoInterfaceObject(interfaceNode)) createInterfaceFromNode(-1, interfaceNode)
}
if(globalProps.size > 0) initList.append((GlobalLoc, globalProps))
////////////////////////////////////////////////////////////////////////////////
// Initialize Heap
////////////////////////////////////////////////////////////////////////////////
applyInitList(h_3)
}
def getTypeFromLoc(loc: Loc): Option[String] = (type2locMap.find(kv => kv._2 == loc) match {
case Some(kv) => Some(kv._1)
case _ => None
}) match {
case Some(str) => Some(str)
case _ => newLocPropsMap.find(kv => kv._2._1 == loc) match {
case Some(kv) =>
// Assumption: there's no type name which ends with decimal digits
var cntN = 0
while ('0' <= kv._1.charAt(kv._1.size - cntN - 1) && kv._1.charAt(kv._1.size - cntN - 1) <= '9') {
cntN = cntN + 1
}
Some(kv._1.dropRight(cntN))
case _ => None
}
}
def addAsyncCall(cfg: CFG, loop_head: CNode): (List[CNode],List[CNode]) = (List(), List())
def isModelFid(fid: FunctionId) = map_fid.contains(fid)
def getFIdMap(): Map[FunctionId, String] = map_fid
def getSemanticMap(): Map[String, SemanticFun] = map_semantic
def getPreSemanticMap(): Map[String, SemanticFun] = map_presemantic
def getDefMap(): Map[String, AccessFun] = map_def
def getUseMap(): Map[String, AccessFun] = map_use
def asyncSemantic(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG,
name: String, list_addr: List[Address]): ((Heap, Context), (Heap, Context)) = {
((HeapBot, ContextBot),(HeapBot, ContextBot))
}
def asyncPreSemantic(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG,
name: String, list_addr: List[Address]): (Heap, Context) = {
(HeapBot, ContextBot)
}
def asyncDef(h: Heap, ctx: Context, cfg: CFG, name: String, list_addr: List[Address]): LPSet = LPBot
def asyncUse(h: Heap, ctx: Context, cfg: CFG, name: String, list_addr: List[Address]): LPSet = LPBot
def asyncCallgraph(h: Heap, inst: CFGInst, map: Map[CFGInst, Set[FunctionId]],
name: String, list_addr: List[Address]): Map[CFGInst, Set[FunctionId]] = Map()
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/widl/WIDLModel.scala | Scala | bsd-3-clause | 37,021 |
/**
* This file is part of the "eidolon" project.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the LICENSE is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
import sbt.Keys._
import sbt.{Build => BaseBuild, _}
import spray.revolver.RevolverPlugin._
/**
* Main build file
*
* @author Elliot Wright <[email protected]>
*/
object Build extends BaseBuild {
import Dependencies._
lazy val commonSettings = Seq(
organization := "space.eidolon",
version := "0.1.0-SNAPSHOT",
scalaVersion := "2.11.6",
resolvers ++= Dependencies.repositories
)
lazy val root = (project in file("."))
.settings(commonSettings: _*)
.settings(name := "eidolon")
.settings(Revolver.settings)
.dependsOn(container)
lazy val container = (project in file("components/eidolon-container"))
.settings(commonSettings: _*)
.settings(name := "component.container")
.settings(libraryDependencies ++=
test(scalaTest)
)
lazy val config = (project in file("components/eidolon-config"))
.settings(commonSettings: _*)
.settings(name := "component.config")
.settings(libraryDependencies ++=
test(scalaTest)
)
lazy val router = (project in file("components/eidolon-router"))
.settings(commonSettings: _*)
.settings(name := "component.router")
.settings(libraryDependencies ++=
compile(akkaActor, scalaAsync) ++
test(scalaTest)
)
}
| eidolon/eidolon-scala | project/Build.scala | Scala | mit | 1,784 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.File
import java.util.Properties
import java.util.concurrent.atomic.AtomicBoolean
import kafka.cluster.Partition
import kafka.log.{Log, LogManager}
import kafka.server.QuotaFactory.QuotaManagers
import kafka.server.metadata.CachedConfigRepository
import kafka.utils.TestUtils.MockAlterIsrManager
import kafka.utils._
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.utils.Time
import org.easymock.EasyMock
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, BeforeEach, Test}
import scala.collection.Seq
import scala.collection.mutable.{HashMap, Map}
class IsrExpirationTest {
var topicPartitionIsr: Map[(String, Int), Seq[Int]] = new HashMap[(String, Int), Seq[Int]]()
val replicaLagTimeMaxMs = 100L
val replicaFetchWaitMaxMs = 100
val leaderLogEndOffset = 20
val leaderLogHighWatermark = 20L
val overridingProps = new Properties()
overridingProps.put(KafkaConfig.ReplicaLagTimeMaxMsProp, replicaLagTimeMaxMs.toString)
overridingProps.put(KafkaConfig.ReplicaFetchWaitMaxMsProp, replicaFetchWaitMaxMs.toString)
val configs = TestUtils.createBrokerConfigs(2, TestUtils.MockZkConnect).map(KafkaConfig.fromProps(_, overridingProps))
val topic = "foo"
val time = new MockTime
val metrics = new Metrics
var quotaManager: QuotaManagers = null
var replicaManager: ReplicaManager = null
var alterIsrManager: MockAlterIsrManager = _
@BeforeEach
def setUp(): Unit = {
val logManager: LogManager = EasyMock.createMock(classOf[LogManager])
EasyMock.expect(logManager.liveLogDirs).andReturn(Array.empty[File]).anyTimes()
EasyMock.replay(logManager)
alterIsrManager = TestUtils.createAlterIsrManager()
quotaManager = QuotaFactory.instantiate(configs.head, metrics, time, "")
replicaManager = new ReplicaManager(configs.head, metrics, time, None, null, logManager, new AtomicBoolean(false),
quotaManager, new BrokerTopicStats, MetadataCache.zkMetadataCache(configs.head.brokerId),
new LogDirFailureChannel(configs.head.logDirs.size), alterIsrManager, new CachedConfigRepository())
}
@AfterEach
def tearDown(): Unit = {
Option(replicaManager).foreach(_.shutdown(false))
Option(quotaManager).foreach(_.shutdown())
metrics.close()
}
/*
* Test the case where a follower is caught up but stops making requests to the leader. Once beyond the configured time limit, it should fall out of ISR
*/
@Test
def testIsrExpirationForStuckFollowers(): Unit = {
val log = logMock
// create one partition and all replicas
val partition0 = getPartitionWithAllReplicasInIsr(topic, 0, time, configs.head, log)
assertEquals(configs.map(_.brokerId).toSet, partition0.inSyncReplicaIds, "All replicas should be in ISR")
// let the follower catch up to the Leader logEndOffset - 1
for (replica <- partition0.remoteReplicas)
replica.updateFetchState(
followerFetchOffsetMetadata = LogOffsetMetadata(leaderLogEndOffset - 1),
followerStartOffset = 0L,
followerFetchTimeMs= time.milliseconds,
leaderEndOffset = leaderLogEndOffset)
var partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs)
assertEquals(Set.empty[Int], partition0OSR, "No replica should be out of sync")
// let some time pass
time.sleep(150)
// now follower hasn't pulled any data for > replicaMaxLagTimeMs ms. So it is stuck
partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs)
assertEquals(Set(configs.last.brokerId), partition0OSR, "Replica 1 should be out of sync")
EasyMock.verify(log)
}
/*
* Test the case where a follower never makes a fetch request. It should fall out of ISR because it will be declared stuck
*/
@Test
def testIsrExpirationIfNoFetchRequestMade(): Unit = {
val log = logMock
// create one partition and all replicas
val partition0 = getPartitionWithAllReplicasInIsr(topic, 0, time, configs.head, log)
assertEquals(configs.map(_.brokerId).toSet, partition0.inSyncReplicaIds, "All replicas should be in ISR")
// Let enough time pass for the replica to be considered stuck
time.sleep(150)
val partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs)
assertEquals(Set(configs.last.brokerId), partition0OSR, "Replica 1 should be out of sync")
EasyMock.verify(log)
}
/*
* Test the case where a follower continually makes fetch requests but is unable to catch up. It should fall out of the ISR
* However, any time it makes a request to the LogEndOffset it should be back in the ISR
*/
@Test
def testIsrExpirationForSlowFollowers(): Unit = {
// create leader replica
val log = logMock
// add one partition
val partition0 = getPartitionWithAllReplicasInIsr(topic, 0, time, configs.head, log)
assertEquals(configs.map(_.brokerId).toSet, partition0.inSyncReplicaIds, "All replicas should be in ISR")
// Make the remote replica not read to the end of log. It should be not be out of sync for at least 100 ms
for (replica <- partition0.remoteReplicas)
replica.updateFetchState(
followerFetchOffsetMetadata = LogOffsetMetadata(leaderLogEndOffset - 2),
followerStartOffset = 0L,
followerFetchTimeMs= time.milliseconds,
leaderEndOffset = leaderLogEndOffset)
// Simulate 2 fetch requests spanning more than 100 ms which do not read to the end of the log.
// The replicas will no longer be in ISR. We do 2 fetches because we want to simulate the case where the replica is lagging but is not stuck
var partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs)
assertEquals(Set.empty[Int], partition0OSR, "No replica should be out of sync")
time.sleep(75)
partition0.remoteReplicas.foreach { r =>
r.updateFetchState(
followerFetchOffsetMetadata = LogOffsetMetadata(leaderLogEndOffset - 1),
followerStartOffset = 0L,
followerFetchTimeMs= time.milliseconds,
leaderEndOffset = leaderLogEndOffset)
}
partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs)
assertEquals(Set.empty[Int], partition0OSR, "No replica should be out of sync")
time.sleep(75)
// The replicas will no longer be in ISR
partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs)
assertEquals(Set(configs.last.brokerId), partition0OSR, "Replica 1 should be out of sync")
// Now actually make a fetch to the end of the log. The replicas should be back in ISR
partition0.remoteReplicas.foreach { r =>
r.updateFetchState(
followerFetchOffsetMetadata = LogOffsetMetadata(leaderLogEndOffset),
followerStartOffset = 0L,
followerFetchTimeMs= time.milliseconds,
leaderEndOffset = leaderLogEndOffset)
}
partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs)
assertEquals(Set.empty[Int], partition0OSR, "No replica should be out of sync")
EasyMock.verify(log)
}
/*
* Test the case where a follower has already caught up with same log end offset with the leader. This follower should not be considered as out-of-sync
*/
@Test
def testIsrExpirationForCaughtUpFollowers(): Unit = {
val log = logMock
// create one partition and all replicas
val partition0 = getPartitionWithAllReplicasInIsr(topic, 0, time, configs.head, log)
assertEquals(configs.map(_.brokerId).toSet, partition0.inSyncReplicaIds, "All replicas should be in ISR")
// let the follower catch up to the Leader logEndOffset
for (replica <- partition0.remoteReplicas)
replica.updateFetchState(
followerFetchOffsetMetadata = LogOffsetMetadata(leaderLogEndOffset),
followerStartOffset = 0L,
followerFetchTimeMs= time.milliseconds,
leaderEndOffset = leaderLogEndOffset)
var partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs)
assertEquals(Set.empty[Int], partition0OSR, "No replica should be out of sync")
// let some time pass
time.sleep(150)
// even though follower hasn't pulled any data for > replicaMaxLagTimeMs ms, the follower has already caught up. So it is not out-of-sync.
partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs)
assertEquals(Set.empty[Int], partition0OSR, "No replica should be out of sync")
EasyMock.verify(log)
}
private def getPartitionWithAllReplicasInIsr(topic: String, partitionId: Int, time: Time, config: KafkaConfig,
localLog: Log): Partition = {
val leaderId = config.brokerId
val tp = new TopicPartition(topic, partitionId)
val partition = replicaManager.createPartition(tp)
partition.setLog(localLog, isFutureLog = false)
partition.updateAssignmentAndIsr(
assignment = configs.map(_.brokerId),
isr = configs.map(_.brokerId).toSet,
addingReplicas = Seq.empty,
removingReplicas = Seq.empty
)
// set lastCaughtUpTime to current time
for (replica <- partition.remoteReplicas)
replica.updateFetchState(
followerFetchOffsetMetadata = LogOffsetMetadata(0L),
followerStartOffset = 0L,
followerFetchTimeMs= time.milliseconds,
leaderEndOffset = 0L)
// set the leader and its hw and the hw update time
partition.leaderReplicaIdOpt = Some(leaderId)
partition
}
private def logMock: Log = {
val log: Log = EasyMock.createMock(classOf[Log])
EasyMock.expect(log.dir).andReturn(TestUtils.tempDir()).anyTimes()
EasyMock.expect(log.logEndOffsetMetadata).andReturn(LogOffsetMetadata(leaderLogEndOffset)).anyTimes()
EasyMock.expect(log.logEndOffset).andReturn(leaderLogEndOffset).anyTimes()
EasyMock.expect(log.highWatermark).andReturn(leaderLogHighWatermark).anyTimes()
EasyMock.replay(log)
log
}
}
| Chasego/kafka | core/src/test/scala/unit/kafka/server/IsrExpirationTest.scala | Scala | apache-2.0 | 10,858 |
/*
* Copyright (c) 2014 koiroha.org.
* All sources and related resources are available under Apache License 2.0.
* http://www.apache.org/licenses/LICENSE-2.0.html
*/
package org.asterisque.netty
import java.util
import io.netty.bootstrap.ServerBootstrap
import io.netty.buffer.ByteBuf
import io.netty.channel.{ChannelHandlerContext, ChannelInitializer, ChannelOption}
import io.netty.channel.nio.{NioEventLoopGroup, NioEventLoop}
import io.netty.channel.socket.SocketChannel
import io.netty.channel.socket.nio.NioServerSocketChannel
import io.netty.handler.codec.ByteToMessageDecoder
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// NettySample
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
/**
* @author Takami Torao
*/
object NettySample {
def main(args:Array[String]):Unit = {
val master = new NioEventLoopGroup()
val worker = new NioEventLoopGroup()
val port = 7263
val server = new ServerBootstrap()
.group(master, worker)
.channel(classOf[NioServerSocketChannel])
.localAddress(port)
.option(ChannelOption.SO_BACKLOG, java.lang.Integer.valueOf(100))
.childOption(ChannelOption.TCP_NODELAY, java.lang.Boolean.TRUE)
.childHandler(new ChannelInitializer[SocketChannel]() {
override def initChannel(ch:SocketChannel):Unit = {
val pipeline = ch.pipeline()
pipeline.addLast("c", new ByteToMessageDecoder {
override def decode(ctx:ChannelHandlerContext, in:ByteBuf, out:util.List[AnyRef]):Unit = {
}
})
}
})
server.bind(port)
}
}
| torao/asterisque | core-scala/src/test/scala/org/asterisque/netty/NettySample.scala | Scala | apache-2.0 | 1,615 |
package au.com.dius.pact.matchers
import au.com.dius.pact.model.{BodyMismatch, BodyMismatchFactory}
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class MinimumMatcherTest extends Specification {
val matcherDef = Map("min" -> 2, "match" -> "type")
"with an array" should {
val path: Seq[String] = Seq("$", "body", "animals", "0", "children")
"match if the array is larger" in {
MinimumMatcher.domatch[BodyMismatch](matcherDef, path, List(1, 2), List(1, 2, 3), BodyMismatchFactory) must beEmpty
}
"match if the array is the correct size" in {
MinimumMatcher.domatch[BodyMismatch](matcherDef, path, List(1, 2), List(1, 3), BodyMismatchFactory) must beEmpty
}
"not match if the array is smaller" in {
MinimumMatcher.domatch[BodyMismatch](matcherDef, path, List(1, 2), List(1), BodyMismatchFactory) must not(beEmpty)
}
}
"with a non-array" should {
val path = Seq("$", "body", "animals", "0", "children", "0")
"default to type matcher" in {
MinimumMatcher.domatch[BodyMismatch](matcherDef, path, "Fred", "George", BodyMismatchFactory) must beEmpty
MinimumMatcher.domatch[BodyMismatch](matcherDef, path, "Fred", 100, BodyMismatchFactory) must not(beEmpty)
}
}
}
| flaregames/pact-jvm | pact-jvm-matchers/src/test/scala/au/com/dius/pact/matchers/MinimumMatcherTest.scala | Scala | apache-2.0 | 1,352 |
package gitbucket.core.controller
import gitbucket.core.dashboard.html
import gitbucket.core.service._
import gitbucket.core.util.{Keys, UsersAuthenticator}
import gitbucket.core.util.Implicits._
import gitbucket.core.service.IssuesService._
class DashboardController
extends DashboardControllerBase
with IssuesService
with PullRequestService
with RepositoryService
with AccountService
with ActivityService
with CommitsService
with LabelsService
with PrioritiesService
with WebHookService
with WebHookPullRequestService
with WebHookPullRequestReviewCommentService
with MilestonesService
with UsersAuthenticator
trait DashboardControllerBase extends ControllerBase {
self: IssuesService with PullRequestService with RepositoryService with AccountService with UsersAuthenticator =>
get("/dashboard/repos")(usersOnly {
val repos = getVisibleRepositories(context.loginAccount, withoutPhysicalInfo = true)
html.repos(getGroupNames(context.loginAccount.get.userName), repos, repos)
})
get("/dashboard/issues")(usersOnly {
searchIssues("created_by")
})
get("/dashboard/issues/assigned")(usersOnly {
searchIssues("assigned")
})
get("/dashboard/issues/created_by")(usersOnly {
searchIssues("created_by")
})
get("/dashboard/issues/mentioned")(usersOnly {
searchIssues("mentioned")
})
get("/dashboard/pulls")(usersOnly {
searchPullRequests("created_by")
})
get("/dashboard/pulls/created_by")(usersOnly {
searchPullRequests("created_by")
})
get("/dashboard/pulls/assigned")(usersOnly {
searchPullRequests("assigned")
})
get("/dashboard/pulls/mentioned")(usersOnly {
searchPullRequests("mentioned")
})
private def getOrCreateCondition(key: String, filter: String, userName: String) = {
val condition = IssueSearchCondition(request)
filter match {
case "assigned" => condition.copy(assigned = Some(Some(userName)), author = None, mentioned = None)
case "mentioned" => condition.copy(assigned = None, author = None, mentioned = Some(userName))
case _ => condition.copy(assigned = None, author = Some(userName), mentioned = None)
}
}
private def searchIssues(filter: String) = {
import IssuesService._
val userName = context.loginAccount.get.userName
val condition = getOrCreateCondition(Keys.Session.DashboardIssues, filter, userName)
val userRepos = getUserRepositories(userName, true).map(repo => repo.owner -> repo.name)
val page = IssueSearchCondition.page(request)
html.issues(
searchIssue(condition, false, (page - 1) * IssueLimit, IssueLimit, userRepos: _*),
page,
countIssue(condition.copy(state = "open"), false, userRepos: _*),
countIssue(condition.copy(state = "closed"), false, userRepos: _*),
filter match {
case "assigned" => condition.copy(assigned = Some(Some(userName)))
case "mentioned" => condition.copy(mentioned = Some(userName))
case _ => condition.copy(author = Some(userName))
},
filter,
getGroupNames(userName),
getVisibleRepositories(context.loginAccount, withoutPhysicalInfo = true)
)
}
private def searchPullRequests(filter: String) = {
import IssuesService._
import PullRequestService._
val userName = context.loginAccount.get.userName
val condition = getOrCreateCondition(Keys.Session.DashboardPulls, filter, userName)
val allRepos = getAllRepositories(userName)
val page = IssueSearchCondition.page(request)
html.pulls(
searchIssue(condition, true, (page - 1) * PullRequestLimit, PullRequestLimit, allRepos: _*),
page,
countIssue(condition.copy(state = "open"), true, allRepos: _*),
countIssue(condition.copy(state = "closed"), true, allRepos: _*),
filter match {
case "assigned" => condition.copy(assigned = Some(Some(userName)))
case "mentioned" => condition.copy(mentioned = Some(userName))
case _ => condition.copy(author = Some(userName))
},
filter,
getGroupNames(userName),
getVisibleRepositories(context.loginAccount, withoutPhysicalInfo = true)
)
}
}
| mann-ed/gitbucket | src/main/scala/gitbucket/core/controller/DashboardController.scala | Scala | apache-2.0 | 4,221 |
package model.form.data
/**
* @author Benjamin R. White <[email protected]>
*/
case class BlockUserFormData(username: String)
| ProjectAton/AtonLab | app/model/form/data/BlockUserFormData.scala | Scala | gpl-3.0 | 125 |
/*
* Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.openjdk.jmh.samples
import org.openjdk.jmh.annotations._
import org.openjdk.jmh.infra.{BenchmarkParams, ThreadParams}
import java.util.ArrayList
import java.util.List
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
object JMHSample_31_InfraParams {
final val THREAD_SLICE = 1000
/*
* Here is another neat trick. Generate the distinct set of keys for all threads:
*/
@State(Scope.Thread)
class Ids {
private[samples] var ids: List[String] = _
@Setup
def setup(threads: ThreadParams) {
ids = new ArrayList[String]
var c = 0
while (c < THREAD_SLICE) {
ids.add("ID" + (THREAD_SLICE * threads.getThreadIndex() + c))
c += 1
}
}
}
}
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
@State(Scope.Benchmark)
class JMHSample_31_InfraParams {
import JMHSample_31_InfraParams._
/*
* There is a way to query JMH about the current running mode. This is
* possible with three infrastructure objects we can request to be injected:
* - BenchmarkParams: covers the benchmark-global configuration
* - IterationParams: covers the current iteration configuration
* - ThreadParams: covers the specifics about threading
*
* Suppose we want to check how the ConcurrentHashMap scales under different
* parallelism levels. We can put concurrencyLevel in @Param, but it sometimes
* inconvenient if, say, we want it to follow the @Threads count. Here is
* how we can query JMH about how many threads was requested for the current run,
* and put that into concurrencyLevel argument for CHM constructor.
*/
private var mapSingle: ConcurrentHashMap[String, String] = _
private var mapFollowThreads: ConcurrentHashMap[String, String] = _
@Setup
def setup(params: BenchmarkParams) {
val capacity = 16 * THREAD_SLICE * params.getThreads
mapSingle = new ConcurrentHashMap[String, String](capacity, 0.75f, 1)
mapFollowThreads = new ConcurrentHashMap[String, String](capacity, 0.75f, params.getThreads)
}
@Benchmark
def measureDefault(ids: Ids) {
for (s <- ids.ids.asScala) {
mapSingle.remove(s)
mapSingle.put(s, s)
}
}
@Benchmark
def measureFollowThreads(ids: Ids) {
for (s <- ids.ids.asScala) {
mapFollowThreads.remove(s)
mapFollowThreads.put(s, s)
}
}
}
| bantonsson/sbt-jmh | src/sbt-test/sbt-jmh/run/src/main/scala/org/openjdk/jmh/samples/JMHSample_31_InfraParams.scala | Scala | apache-2.0 | 3,653 |
/*
* Copyright 2010-2011 Vilius Normantas <[email protected]>
*
* This file is part of Crossbow library.
*
* Crossbow is free software: you can redistribute it and/or modify it under the terms of the GNU
* General Public License as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* Crossbow is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
* even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with Crossbow. If not,
* see <http://www.gnu.org/licenses/>.
*/
package lt.norma.crossbow.indicators
import lt.norma.crossbow.core.IndicatorList
import lt.norma.crossbow.messages.{ BarClose, EmptyMessage, SessionClose, SessionOpen }
import org.joda.time.DateTime
import org.scalatest.FunSuite
class SessionBarCountTest extends FunSuite {
test("name") {
val indicator = new SessionBarCount
expect("Session Bar Count") {
indicator.name
}
}
test("dependencies") {
val indicator = new SessionBarCount
expect(Set()) {
indicator.dependencies
}
}
test("default value") {
val indicator = new SessionBarCount
expect(Some(0)) {
indicator()
}
}
test("calculation") {
val indicator = new SessionBarCount
val list = new IndicatorList(indicator)
expect(Some(0)) {
indicator()
}
list.send(BarClose(new DateTime))
expect(Some(1)) {
indicator()
}
list.send(EmptyMessage)
list.send(BarClose(new DateTime))
expect(Some(2)) {
indicator()
}
indicator.unset()
expect(Some(0)) {
indicator()
}
list.send(BarClose(new DateTime))
list.send(BarClose(new DateTime))
list.send(BarClose(new DateTime))
list.send(BarClose(new DateTime))
list.send(BarClose(new DateTime))
expect(Some(5)) {
indicator()
}
list.send(SessionClose(new DateTime))
expect(Some(5)) {
indicator()
}
list.send(BarClose(new DateTime))
expect(Some(6)) {
indicator()
}
list.send(SessionOpen(new DateTime))
expect(Some(0)) {
indicator()
}
list.send(BarClose(new DateTime))
expect(Some(1)) {
indicator()
}
}
}
| ViliusN/Crossbow | crossbow-core/test/lt/norma/crossbow/indicators/SessionBarCountTest.scala | Scala | gpl-3.0 | 2,384 |
package net.rrm.ehour.timesheet.dto
import org.scalatest.{Matchers, WordSpec}
import net.rrm.ehour.domain.{TimesheetEntry, TimesheetEntryObjectMother, ProjectAssignmentObjectMother}
import java.{util => ju}
import org.joda.time.LocalDate
class WeekOverviewSpec extends WordSpec with Matchers {
"Week Overview" should {
"have two assignments with 1 timesheet entry each" in {
val entry1 = TimesheetEntryObjectMother.createTimesheetEntry(1, LocalDate.parse("20131108").toDate, 5f)
val entry2 = TimesheetEntryObjectMother.createTimesheetEntry(2, LocalDate.parse("20131109").toDate, 3f)
val overview = new WeekOverview(ju.Arrays.asList(entry1, entry2), ju.Arrays.asList())
overview.getAssignmentMap should have size 2
val entriesOnDate = overview.getAssignmentMap.get(entry1.getEntryId.getProjectAssignment)
entriesOnDate should have size 1
val entry: TimesheetEntry = entriesOnDate.get(overview.formatter.format(entry1.getEntryId.getEntryDate))
entry.getHours should be (5f)
}
"have one assignments with 2 timesheet entries" in {
val entry1 = TimesheetEntryObjectMother.createTimesheetEntry(1, LocalDate.parse("20131108").toDate, 5f)
val entry2 = TimesheetEntryObjectMother.createTimesheetEntry(1, LocalDate.parse("20131109").toDate, 3f)
val overview = new WeekOverview(ju.Arrays.asList(entry1, entry2), ju.Arrays.asList())
overview.getAssignmentMap should have size 1
val entriesOnDate = overview.getAssignmentMap.get(entry1.getEntryId.getProjectAssignment)
entriesOnDate should have size 2
val entry: TimesheetEntry = entriesOnDate.get(overview.formatter.format(entry2.getEntryId.getEntryDate))
entry.getHours should be (3f)
}
"merge assignments without entries" in {
val assignment = ProjectAssignmentObjectMother.createProjectAssignment(3)
val entry1 = TimesheetEntryObjectMother.createTimesheetEntry(1, LocalDate.parse("20131108").toDate, 5f)
val entry2 = TimesheetEntryObjectMother.createTimesheetEntry(2, LocalDate.parse("20131109").toDate, 3f)
val overview = new WeekOverview(ju.Arrays.asList(entry1, entry2), ju.Arrays.asList(assignment))
overview.getAssignmentMap should have size 3
}
}
}
| momogentoo/ehour | eHour-common/src/test/scala/net/rrm/ehour/timesheet/dto/WeekOverviewSpec.scala | Scala | gpl-2.0 | 2,266 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.clustering
import breeze.linalg.{DenseVector => BDV}
import org.apache.hadoop.fs.Path
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.ml.{Estimator, Model}
import org.apache.spark.ml.impl.Utils.EPSILON
import org.apache.spark.ml.linalg._
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared._
import org.apache.spark.ml.stat.distribution.MultivariateGaussian
import org.apache.spark.ml.util._
import org.apache.spark.ml.util.Instrumentation.instrumented
import org.apache.spark.mllib.linalg.{Matrices => OldMatrices, Matrix => OldMatrix,
Vector => OldVector, Vectors => OldVectors}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.types.{IntegerType, StructType}
/**
* Common params for GaussianMixture and GaussianMixtureModel
*/
private[clustering] trait GaussianMixtureParams extends Params with HasMaxIter with HasFeaturesCol
with HasSeed with HasPredictionCol with HasProbabilityCol with HasTol {
/**
* Number of independent Gaussians in the mixture model. Must be greater than 1. Default: 2.
*
* @group param
*/
@Since("2.0.0")
final val k = new IntParam(this, "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", ParamValidators.gt(1))
/** @group getParam */
@Since("2.0.0")
def getK: Int = $(k)
/**
* Validates and transforms the input schema.
*
* @param schema input schema
* @return output schema
*/
protected def validateAndTransformSchema(schema: StructType): StructType = {
SchemaUtils.validateVectorCompatibleColumn(schema, getFeaturesCol)
val schemaWithPredictionCol = SchemaUtils.appendColumn(schema, $(predictionCol), IntegerType)
SchemaUtils.appendColumn(schemaWithPredictionCol, $(probabilityCol), new VectorUDT)
}
}
/**
* Multivariate Gaussian Mixture Model (GMM) consisting of k Gaussians, where points
* are drawn from each Gaussian i with probability weights(i).
*
* @param weights Weight for each Gaussian distribution in the mixture.
* This is a multinomial probability distribution over the k Gaussians,
* where weights(i) is the weight for Gaussian i, and weights sum to 1.
* @param gaussians Array of `MultivariateGaussian` where gaussians(i) represents
* the Multivariate Gaussian (Normal) Distribution for Gaussian i
*/
@Since("2.0.0")
class GaussianMixtureModel private[ml] (
@Since("2.0.0") override val uid: String,
@Since("2.0.0") val weights: Array[Double],
@Since("2.0.0") val gaussians: Array[MultivariateGaussian])
extends Model[GaussianMixtureModel] with GaussianMixtureParams with MLWritable {
/** @group setParam */
@Since("2.1.0")
def setFeaturesCol(value: String): this.type = set(featuresCol, value)
/** @group setParam */
@Since("2.1.0")
def setPredictionCol(value: String): this.type = set(predictionCol, value)
/** @group setParam */
@Since("2.1.0")
def setProbabilityCol(value: String): this.type = set(probabilityCol, value)
@Since("2.0.0")
override def copy(extra: ParamMap): GaussianMixtureModel = {
val copied = copyValues(new GaussianMixtureModel(uid, weights, gaussians), extra)
copied.setSummary(trainingSummary).setParent(this.parent)
}
@Since("2.0.0")
override def transform(dataset: Dataset[_]): DataFrame = {
transformSchema(dataset.schema, logging = true)
val predUDF = udf((vector: Vector) => predict(vector))
val probUDF = udf((vector: Vector) => predictProbability(vector))
dataset
.withColumn($(predictionCol), predUDF(DatasetUtils.columnToVector(dataset, getFeaturesCol)))
.withColumn($(probabilityCol), probUDF(DatasetUtils.columnToVector(dataset, getFeaturesCol)))
}
@Since("2.0.0")
override def transformSchema(schema: StructType): StructType = {
validateAndTransformSchema(schema)
}
private[clustering] def predict(features: Vector): Int = {
val r = predictProbability(features)
r.argmax
}
private[clustering] def predictProbability(features: Vector): Vector = {
val probs: Array[Double] =
GaussianMixtureModel.computeProbabilities(features.asBreeze.toDenseVector, gaussians, weights)
Vectors.dense(probs)
}
/**
* Retrieve Gaussian distributions as a DataFrame.
* Each row represents a Gaussian Distribution.
* Two columns are defined: mean and cov.
* Schema:
* {{{
* root
* |-- mean: vector (nullable = true)
* |-- cov: matrix (nullable = true)
* }}}
*/
@Since("2.0.0")
def gaussiansDF: DataFrame = {
val modelGaussians = gaussians.map { gaussian =>
(OldVectors.fromML(gaussian.mean), OldMatrices.fromML(gaussian.cov))
}
SparkSession.builder().getOrCreate().createDataFrame(modelGaussians).toDF("mean", "cov")
}
/**
* Returns a [[org.apache.spark.ml.util.MLWriter]] instance for this ML instance.
*
* For [[GaussianMixtureModel]], this does NOT currently save the training [[summary]].
* An option to save [[summary]] may be added in the future.
*
*/
@Since("2.0.0")
override def write: MLWriter = new GaussianMixtureModel.GaussianMixtureModelWriter(this)
private var trainingSummary: Option[GaussianMixtureSummary] = None
private[clustering] def setSummary(summary: Option[GaussianMixtureSummary]): this.type = {
this.trainingSummary = summary
this
}
/**
* Return true if there exists summary of model.
*/
@Since("2.0.0")
def hasSummary: Boolean = trainingSummary.nonEmpty
/**
* Gets summary of model on training set. An exception is
* thrown if `trainingSummary == None`.
*/
@Since("2.0.0")
def summary: GaussianMixtureSummary = trainingSummary.getOrElse {
throw new RuntimeException(
s"No training summary available for the ${this.getClass.getSimpleName}")
}
}
@Since("2.0.0")
object GaussianMixtureModel extends MLReadable[GaussianMixtureModel] {
@Since("2.0.0")
override def read: MLReader[GaussianMixtureModel] = new GaussianMixtureModelReader
@Since("2.0.0")
override def load(path: String): GaussianMixtureModel = super.load(path)
/** [[MLWriter]] instance for [[GaussianMixtureModel]] */
private[GaussianMixtureModel] class GaussianMixtureModelWriter(
instance: GaussianMixtureModel) extends MLWriter {
private case class Data(weights: Array[Double], mus: Array[OldVector], sigmas: Array[OldMatrix])
override protected def saveImpl(path: String): Unit = {
// Save metadata and Params
DefaultParamsWriter.saveMetadata(instance, path, sc)
// Save model data: weights and gaussians
val weights = instance.weights
val gaussians = instance.gaussians
val mus = gaussians.map(g => OldVectors.fromML(g.mean))
val sigmas = gaussians.map(c => OldMatrices.fromML(c.cov))
val data = Data(weights, mus, sigmas)
val dataPath = new Path(path, "data").toString
sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
}
}
private class GaussianMixtureModelReader extends MLReader[GaussianMixtureModel] {
/** Checked against metadata when loading model */
private val className = classOf[GaussianMixtureModel].getName
override def load(path: String): GaussianMixtureModel = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
val dataPath = new Path(path, "data").toString
val row = sparkSession.read.parquet(dataPath).select("weights", "mus", "sigmas").head()
val weights = row.getSeq[Double](0).toArray
val mus = row.getSeq[OldVector](1).toArray
val sigmas = row.getSeq[OldMatrix](2).toArray
require(mus.length == sigmas.length, "Length of Mu and Sigma array must match")
require(mus.length == weights.length, "Length of weight and Gaussian array must match")
val gaussians = mus.zip(sigmas).map {
case (mu, sigma) =>
new MultivariateGaussian(mu.asML, sigma.asML)
}
val model = new GaussianMixtureModel(metadata.uid, weights, gaussians)
metadata.getAndSetParams(model)
model
}
}
/**
* Compute the probability (partial assignment) for each cluster for the given data point.
*
* @param features Data point
* @param dists Gaussians for model
* @param weights Weights for each Gaussian
* @return Probability (partial assignment) for each of the k clusters
*/
private[clustering]
def computeProbabilities(
features: BDV[Double],
dists: Array[MultivariateGaussian],
weights: Array[Double]): Array[Double] = {
val p = weights.zip(dists).map {
case (weight, dist) => EPSILON + weight * dist.pdf(features)
}
val pSum = p.sum
var i = 0
while (i < weights.length) {
p(i) /= pSum
i += 1
}
p
}
}
/**
* Gaussian Mixture clustering.
*
* This class performs expectation maximization for multivariate Gaussian
* Mixture Models (GMMs). A GMM represents a composite distribution of
* independent Gaussian distributions with associated "mixing" weights
* specifying each's contribution to the composite.
*
* Given a set of sample points, this class will maximize the log-likelihood
* for a mixture of k Gaussians, iterating until the log-likelihood changes by
* less than convergenceTol, or until it has reached the max number of iterations.
* While this process is generally guaranteed to converge, it is not guaranteed
* to find a global optimum.
*
* @note This algorithm is limited in its number of features since it requires storing a covariance
* matrix which has size quadratic in the number of features. Even when the number of features does
* not exceed this limit, this algorithm may perform poorly on high-dimensional data.
* This is due to high-dimensional data (a) making it difficult to cluster at all (based
* on statistical/theoretical arguments) and (b) numerical issues with Gaussian distributions.
*/
@Since("2.0.0")
class GaussianMixture @Since("2.0.0") (
@Since("2.0.0") override val uid: String)
extends Estimator[GaussianMixtureModel] with GaussianMixtureParams with DefaultParamsWritable {
setDefault(
k -> 2,
maxIter -> 100,
tol -> 0.01)
@Since("2.0.0")
override def copy(extra: ParamMap): GaussianMixture = defaultCopy(extra)
@Since("2.0.0")
def this() = this(Identifiable.randomUID("GaussianMixture"))
/** @group setParam */
@Since("2.0.0")
def setFeaturesCol(value: String): this.type = set(featuresCol, value)
/** @group setParam */
@Since("2.0.0")
def setPredictionCol(value: String): this.type = set(predictionCol, value)
/** @group setParam */
@Since("2.0.0")
def setProbabilityCol(value: String): this.type = set(probabilityCol, value)
/** @group setParam */
@Since("2.0.0")
def setK(value: Int): this.type = set(k, value)
/** @group setParam */
@Since("2.0.0")
def setMaxIter(value: Int): this.type = set(maxIter, value)
/** @group setParam */
@Since("2.0.0")
def setTol(value: Double): this.type = set(tol, value)
/** @group setParam */
@Since("2.0.0")
def setSeed(value: Long): this.type = set(seed, value)
/**
* Number of samples per cluster to use when initializing Gaussians.
*/
private val numSamples = 5
@Since("2.0.0")
override def fit(dataset: Dataset[_]): GaussianMixtureModel = instrumented { instr =>
transformSchema(dataset.schema, logging = true)
val sc = dataset.sparkSession.sparkContext
val numClusters = $(k)
val instances = dataset
.select(DatasetUtils.columnToVector(dataset, getFeaturesCol)).rdd.map {
case Row(features: Vector) => features
}.cache()
// Extract the number of features.
val numFeatures = instances.first().size
require(numFeatures < GaussianMixture.MAX_NUM_FEATURES, s"GaussianMixture cannot handle more " +
s"than ${GaussianMixture.MAX_NUM_FEATURES} features because the size of the covariance" +
s" matrix is quadratic in the number of features.")
instr.logPipelineStage(this)
instr.logDataset(dataset)
instr.logParams(this, featuresCol, predictionCol, probabilityCol, k, maxIter, seed, tol)
instr.logNumFeatures(numFeatures)
val shouldDistributeGaussians = GaussianMixture.shouldDistributeGaussians(
numClusters, numFeatures)
// TODO: SPARK-15785 Support users supplied initial GMM.
val (weights, gaussians) = initRandom(instances, numClusters, numFeatures)
var logLikelihood = Double.MinValue
var logLikelihoodPrev = 0.0
var iter = 0
while (iter < $(maxIter) && math.abs(logLikelihood - logLikelihoodPrev) > $(tol)) {
val bcWeights = instances.sparkContext.broadcast(weights)
val bcGaussians = instances.sparkContext.broadcast(gaussians)
// aggregate the cluster contribution for all sample points
val sums = instances.treeAggregate(
new ExpectationAggregator(numFeatures, bcWeights, bcGaussians))(
seqOp = (c, v) => (c, v) match {
case (aggregator, instance) => aggregator.add(instance)
},
combOp = (c1, c2) => (c1, c2) match {
case (aggregator1, aggregator2) => aggregator1.merge(aggregator2)
})
bcWeights.destroy(blocking = false)
bcGaussians.destroy(blocking = false)
if (iter == 0) {
val numSamples = sums.count
instr.logNumExamples(numSamples)
}
/*
Create new distributions based on the partial assignments
(often referred to as the "M" step in literature)
*/
val sumWeights = sums.weights.sum
if (shouldDistributeGaussians) {
val numPartitions = math.min(numClusters, 1024)
val tuples = Seq.tabulate(numClusters) { i =>
(sums.means(i), sums.covs(i), sums.weights(i))
}
val (ws, gs) = sc.parallelize(tuples, numPartitions).map { case (mean, cov, weight) =>
GaussianMixture.updateWeightsAndGaussians(mean, cov, weight, sumWeights)
}.collect().unzip
Array.copy(ws, 0, weights, 0, ws.length)
Array.copy(gs, 0, gaussians, 0, gs.length)
} else {
var i = 0
while (i < numClusters) {
val (weight, gaussian) = GaussianMixture.updateWeightsAndGaussians(
sums.means(i), sums.covs(i), sums.weights(i), sumWeights)
weights(i) = weight
gaussians(i) = gaussian
i += 1
}
}
logLikelihoodPrev = logLikelihood // current becomes previous
logLikelihood = sums.logLikelihood // this is the freshly computed log-likelihood
iter += 1
}
instances.unpersist(false)
val gaussianDists = gaussians.map { case (mean, covVec) =>
val cov = GaussianMixture.unpackUpperTriangularMatrix(numFeatures, covVec.values)
new MultivariateGaussian(mean, cov)
}
val model = copyValues(new GaussianMixtureModel(uid, weights, gaussianDists)).setParent(this)
val summary = new GaussianMixtureSummary(model.transform(dataset),
$(predictionCol), $(probabilityCol), $(featuresCol), $(k), logLikelihood, iter)
instr.logNamedValue("logLikelihood", logLikelihood)
instr.logNamedValue("clusterSizes", summary.clusterSizes)
model.setSummary(Some(summary))
}
@Since("2.0.0")
override def transformSchema(schema: StructType): StructType = {
validateAndTransformSchema(schema)
}
/**
* Initialize weights and corresponding gaussian distributions at random.
*
* We start with uniform weights, a random mean from the data, and diagonal covariance matrices
* using component variances derived from the samples.
*
* @param instances The training instances.
* @param numClusters The number of clusters.
* @param numFeatures The number of features of training instance.
* @return The initialized weights and corresponding gaussian distributions. Note the
* covariance matrix of multivariate gaussian distribution is symmetric and
* we only save the upper triangular part as a dense vector (column major).
*/
private def initRandom(
instances: RDD[Vector],
numClusters: Int,
numFeatures: Int): (Array[Double], Array[(DenseVector, DenseVector)]) = {
val samples = instances.takeSample(withReplacement = true, numClusters * numSamples, $(seed))
val weights: Array[Double] = Array.fill(numClusters)(1.0 / numClusters)
val gaussians: Array[(DenseVector, DenseVector)] = Array.tabulate(numClusters) { i =>
val slice = samples.view(i * numSamples, (i + 1) * numSamples)
val mean = {
val v = new DenseVector(new Array[Double](numFeatures))
var i = 0
while (i < numSamples) {
BLAS.axpy(1.0, slice(i), v)
i += 1
}
BLAS.scal(1.0 / numSamples, v)
v
}
/*
Construct matrix where diagonal entries are element-wise
variance of input vectors (computes biased variance).
Since the covariance matrix of multivariate gaussian distribution is symmetric,
only the upper triangular part of the matrix (column major) will be saved as
a dense vector in order to reduce the shuffled data size.
*/
val cov = {
val ss = new DenseVector(new Array[Double](numFeatures)).asBreeze
slice.foreach(xi => ss += (xi.asBreeze - mean.asBreeze) ^:^ 2.0)
val diagVec = Vectors.fromBreeze(ss)
BLAS.scal(1.0 / numSamples, diagVec)
val covVec = new DenseVector(Array.fill[Double](
numFeatures * (numFeatures + 1) / 2)(0.0))
diagVec.toArray.zipWithIndex.foreach { case (v: Double, i: Int) =>
covVec.values(i + i * (i + 1) / 2) = v
}
covVec
}
(mean, cov)
}
(weights, gaussians)
}
}
@Since("2.0.0")
object GaussianMixture extends DefaultParamsReadable[GaussianMixture] {
/** Limit number of features such that numFeatures^2^ < Int.MaxValue */
private[clustering] val MAX_NUM_FEATURES = math.sqrt(Int.MaxValue).toInt
@Since("2.0.0")
override def load(path: String): GaussianMixture = super.load(path)
/**
* Heuristic to distribute the computation of the [[MultivariateGaussian]]s, approximately when
* numFeatures > 25 except for when numClusters is very small.
*
* @param numClusters Number of clusters
* @param numFeatures Number of features
*/
private[clustering] def shouldDistributeGaussians(
numClusters: Int,
numFeatures: Int): Boolean = {
((numClusters - 1.0) / numClusters) * numFeatures > 25.0
}
/**
* Convert an n * (n + 1) / 2 dimension array representing the upper triangular part of a matrix
* into an n * n array representing the full symmetric matrix (column major).
*
* @param n The order of the n by n matrix.
* @param triangularValues The upper triangular part of the matrix packed in an array
* (column major).
* @return A dense matrix which represents the symmetric matrix in column major.
*/
private[clustering] def unpackUpperTriangularMatrix(
n: Int,
triangularValues: Array[Double]): DenseMatrix = {
val symmetricValues = new Array[Double](n * n)
var r = 0
var i = 0
while (i < n) {
var j = 0
while (j <= i) {
symmetricValues(i * n + j) = triangularValues(r)
symmetricValues(j * n + i) = triangularValues(r)
r += 1
j += 1
}
i += 1
}
new DenseMatrix(n, n, symmetricValues)
}
/**
* Update the weight, mean and covariance of gaussian distribution.
*
* @param mean The mean of the gaussian distribution.
* @param cov The covariance matrix of the gaussian distribution. Note we only
* save the upper triangular part as a dense vector (column major).
* @param weight The weight of the gaussian distribution.
* @param sumWeights The sum of weights of all clusters.
* @return The updated weight, mean and covariance.
*/
private[clustering] def updateWeightsAndGaussians(
mean: DenseVector,
cov: DenseVector,
weight: Double,
sumWeights: Double): (Double, (DenseVector, DenseVector)) = {
BLAS.scal(1.0 / weight, mean)
BLAS.spr(-weight, mean, cov)
BLAS.scal(1.0 / weight, cov)
val newWeight = weight / sumWeights
val newGaussian = (mean, cov)
(newWeight, newGaussian)
}
}
/**
* ExpectationAggregator computes the partial expectation results.
*
* @param numFeatures The number of features.
* @param bcWeights The broadcast weights for each Gaussian distribution in the mixture.
* @param bcGaussians The broadcast array of Multivariate Gaussian (Normal) Distribution
* in the mixture. Note only upper triangular part of the covariance
* matrix of each distribution is stored as dense vector (column major)
* in order to reduce shuffled data size.
*/
private class ExpectationAggregator(
numFeatures: Int,
bcWeights: Broadcast[Array[Double]],
bcGaussians: Broadcast[Array[(DenseVector, DenseVector)]]) extends Serializable {
private val k: Int = bcWeights.value.length
private var totalCnt: Long = 0L
private var newLogLikelihood: Double = 0.0
private lazy val newWeights: Array[Double] = new Array[Double](k)
private lazy val newMeans: Array[DenseVector] = Array.fill(k)(
new DenseVector(Array.fill[Double](numFeatures)(0.0)))
private lazy val newCovs: Array[DenseVector] = Array.fill(k)(
new DenseVector(Array.fill[Double](numFeatures * (numFeatures + 1) / 2)(0.0)))
@transient private lazy val oldGaussians = {
bcGaussians.value.map { case (mean, covVec) =>
val cov = GaussianMixture.unpackUpperTriangularMatrix(numFeatures, covVec.values)
new MultivariateGaussian(mean, cov)
}
}
def count: Long = totalCnt
def logLikelihood: Double = newLogLikelihood
def weights: Array[Double] = newWeights
def means: Array[DenseVector] = newMeans
def covs: Array[DenseVector] = newCovs
/**
* Add a new training instance to this ExpectationAggregator, update the weights,
* means and covariances for each distributions, and update the log likelihood.
*
* @param instance The instance of data point to be added.
* @return This ExpectationAggregator object.
*/
def add(instance: Vector): this.type = {
val localWeights = bcWeights.value
val localOldGaussians = oldGaussians
val prob = new Array[Double](k)
var probSum = 0.0
var i = 0
while (i < k) {
val p = EPSILON + localWeights(i) * localOldGaussians(i).pdf(instance)
prob(i) = p
probSum += p
i += 1
}
newLogLikelihood += math.log(probSum)
val localNewWeights = newWeights
val localNewMeans = newMeans
val localNewCovs = newCovs
i = 0
while (i < k) {
prob(i) /= probSum
localNewWeights(i) += prob(i)
BLAS.axpy(prob(i), instance, localNewMeans(i))
BLAS.spr(prob(i), instance, localNewCovs(i))
i += 1
}
totalCnt += 1
this
}
/**
* Merge another ExpectationAggregator, update the weights, means and covariances
* for each distributions, and update the log likelihood.
* (Note that it's in place merging; as a result, `this` object will be modified.)
*
* @param other The other ExpectationAggregator to be merged.
* @return This ExpectationAggregator object.
*/
def merge(other: ExpectationAggregator): this.type = {
if (other.count != 0) {
totalCnt += other.totalCnt
val localThisNewWeights = this.newWeights
val localOtherNewWeights = other.newWeights
val localThisNewMeans = this.newMeans
val localOtherNewMeans = other.newMeans
val localThisNewCovs = this.newCovs
val localOtherNewCovs = other.newCovs
var i = 0
while (i < k) {
localThisNewWeights(i) += localOtherNewWeights(i)
BLAS.axpy(1.0, localOtherNewMeans(i), localThisNewMeans(i))
BLAS.axpy(1.0, localOtherNewCovs(i), localThisNewCovs(i))
i += 1
}
newLogLikelihood += other.newLogLikelihood
}
this
}
}
/**
* :: Experimental ::
* Summary of GaussianMixture.
*
* @param predictions `DataFrame` produced by `GaussianMixtureModel.transform()`.
* @param predictionCol Name for column of predicted clusters in `predictions`.
* @param probabilityCol Name for column of predicted probability of each cluster
* in `predictions`.
* @param featuresCol Name for column of features in `predictions`.
* @param k Number of clusters.
* @param logLikelihood Total log-likelihood for this model on the given data.
* @param numIter Number of iterations.
*/
@Since("2.0.0")
@Experimental
class GaussianMixtureSummary private[clustering] (
predictions: DataFrame,
predictionCol: String,
@Since("2.0.0") val probabilityCol: String,
featuresCol: String,
k: Int,
@Since("2.2.0") val logLikelihood: Double,
numIter: Int)
extends ClusteringSummary(predictions, predictionCol, featuresCol, k, numIter) {
/**
* Probability of each cluster.
*/
@Since("2.0.0")
@transient lazy val probability: DataFrame = predictions.select(probabilityCol)
}
| michalsenkyr/spark | mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala | Scala | apache-2.0 | 26,178 |
package com.insweat.hssd.lib.essence
trait Interpreted {
def interpOut(ctx: Any, intVal: Any): Either[String, Any]
def interpIn(ctx: Any, extVal: Any): Either[String, Any]
}
| insweat/hssd | com.insweat.hssd.lib/src/com/insweat/hssd/lib/essence/Interpreted.scala | Scala | lgpl-3.0 | 183 |
package wvlet.airframe.http.okhttp
import java.nio.charset.StandardCharsets
import okhttp3.internal.http.HttpMethod
import okhttp3.{Protocol, Request, RequestBody, Response, ResponseBody}
import wvlet.airframe.http.{HttpMultiMap, HttpStatus}
import wvlet.airspec.AirSpec
class OkHttpTest extends AirSpec {
test("provide facade of http requests") {
val body = RequestBody.create(ContentTypeJson, "hello okhttp")
Seq(
new Request.Builder().get(),
new Request.Builder().post(body),
new Request.Builder().delete(body),
new Request.Builder().put(body),
new Request.Builder().patch(body),
new Request.Builder().head(),
new Request.Builder().method("OPTIONS", body),
new Request.Builder().method("TRACE", body)
).foreach { builder =>
val req = builder.url("http://localhost/hello").build()
val r = req.toHttpRequest
r.method shouldBe toHttpMethod(req.method())
r.path shouldBe "/hello"
r.query shouldBe HttpMultiMap.empty
if (HttpMethod.permitsRequestBody(req.method())) {
r.contentString shouldBe "hello okhttp"
r.contentBytes shouldBe "hello okhttp".getBytes(StandardCharsets.UTF_8)
r.contentType shouldBe Some("application/json;charset=utf-8")
} else {
r.contentString shouldBe ""
r.contentBytes shouldBe Array.empty[Byte]
r.contentType shouldBe empty
}
req.toRaw shouldBeTheSameInstanceAs req
}
}
test("provide facade of http responses") {
val res = new Response.Builder()
.code(403)
.body(ResponseBody.create(ContentTypeJson, "hello world"))
.request(new Request.Builder().url("http://localhost/").get().build())
.protocol(Protocol.HTTP_1_1)
.message("message")
.build()
val r = res.toHttpResponse
r.status shouldBe HttpStatus.Forbidden_403
r.statusCode shouldBe 403
r.contentType shouldBe Some("application/json;charset=utf-8")
r.contentBytes shouldBe "hello world".getBytes(StandardCharsets.UTF_8)
res.toRaw shouldBeTheSameInstanceAs res
}
}
| wvlet/airframe | airframe-http-okhttp/src/test/scala/wvlet/airframe/http/okhttp/OkHttpTest.scala | Scala | apache-2.0 | 2,089 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.retriever.{AbridgedAccountsBoxRetriever, Frs102AccountsBoxRetriever, FullAccountsBoxRetriever}
import uk.gov.hmrc.ct.box.ValidatableBox._
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.box.retriever.BoxRetriever._
case class AC5058A(value: Option[String]) extends CtBoxIdentifier(name = "Balance sheet - Creditors within 1 year note.") with CtOptionalString with Input with SelfValidatableBox[Frs102AccountsBoxRetriever, Option[String]] {
override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = {
import boxRetriever._
val isMandatory = anyHaveValue(ac58(), ac59())
collectErrors (
failIf(!isMandatory)(
validateCannotExist(boxRetriever)
),
failIf(isMandatory)(
validateNoteIsMandatory(boxRetriever)
),
validateStringMaxLength(value.getOrElse(""), StandardCohoTextFieldLimit),
validateCoHoStringReturnIllegalChars()
)
}
private def validateCannotExist(boxRetriever: Frs102AccountsBoxRetriever)(): Set[CtValidation] = {
boxRetriever match {
case x: AbridgedAccountsBoxRetriever => failIf(hasValue)(Set(CtValidation(None, "error.balanceSheet.creditorsWithinOneYear.cannotExist")))
case x: FullAccountsBoxRetriever => failIf(fullNoteHasValue(x))(Set(CtValidation(None, "error.balanceSheet.creditorsWithinOneYear.cannotExist")))
case _ => Set.empty
}
}
private def validateNoteIsMandatory(boxRetriever: Frs102AccountsBoxRetriever)(): Set[CtValidation] = {
boxRetriever match {
case x: FullAccountsBoxRetriever =>
failIf(!fullNoteHasValue(x))(Set(CtValidation(None, "error.creditors.within.one.year.note.one.box.required")))
case _ => Set.empty
}
}
private def fullNoteHasValue(boxRetriever: FullAccountsBoxRetriever): Boolean = {
import boxRetriever._
anyHaveValue(
ac142(), ac143(),
ac144(), ac145(),
ac146(), ac147(),
ac148(), ac149(),
ac150(), ac151(),
ac152(), ac153(),
this
)
}
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC5058A.scala | Scala | apache-2.0 | 2,702 |
/*
* Copyright 2014 JHC Systems Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sqlest.ast
case class Union[A](select: Select[A, _ <: Relation], unionAll: Boolean)
| DavidGregory084/sqlest | sqlest/src/main/scala/sqlest/ast/Union.scala | Scala | apache-2.0 | 697 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.jobs.index
import com.twitter.scalding._
import org.apache.accumulo.core.data.{Mutation, Range => AcRange}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.Text
import org.geotools.data.DataStoreFinder
import org.locationtech.geomesa.accumulo.data.AccumuloDataStoreFactory.params._
import org.locationtech.geomesa.accumulo.data.AccumuloFeatureWriter.FeatureToWrite
import org.locationtech.geomesa.accumulo.data._
import org.locationtech.geomesa.accumulo.data.tables.SpatioTemporalTable
import org.locationtech.geomesa.accumulo.index._
import org.locationtech.geomesa.features.{SimpleFeatureDeserializers, SimpleFeatureSerializers}
import org.locationtech.geomesa.jobs.GeoMesaBaseJob
import org.locationtech.geomesa.jobs.scalding.ConnectionParams._
import org.locationtech.geomesa.jobs.scalding._
import scala.collection.JavaConverters._
class SortedIndexUpdateJob(args: Args) extends GeoMesaBaseJob(args) {
val UPDATE_TO_VERSION = 2 // we want to maintain compatibility with any attribute indices written
val feature = args(FEATURE_IN)
val dsParams = toDataStoreInParams(args)
// non-serializable resources - need to be lazy and transient so they are available to each mapper
@transient lazy val ds = DataStoreFinder.getDataStore(dsParams.asJava).asInstanceOf[AccumuloDataStore]
@transient lazy val sft = ds.getSchema(feature)
@transient lazy val indexSchemaFmt = ds.buildDefaultSpatioTemporalSchema(sft.getTypeName)
@transient lazy val encoding = ds.getFeatureEncoding(sft)
@transient lazy val featureEncoder = SimpleFeatureSerializers(sft, encoding)
// this won't use the new schema version, but anything less than version 4 is handled the same way
@transient lazy val indexValueEncoder = IndexValueEncoder(sft)
@transient lazy val encoder = IndexSchema.buildKeyEncoder(sft, indexSchemaFmt)
@transient lazy val decoder = SimpleFeatureDeserializers(sft, encoding)
val (input, output) = {
val indexSchemaFmt = ds.getIndexSchemaFmt(sft.getTypeName)
val encoder = IndexSchema.buildKeyEncoder(sft, indexSchemaFmt)
val maxShard = IndexSchema.maxShard(indexSchemaFmt)
val prefixes = (0 to maxShard).map { i =>
encoder.rowf match { case CompositeTextFormatter(formatters, sep) =>
formatters.take(2).map {
case f: PartitionTextFormatter => f.fmt(i)
case f: ConstantTextFormatter => f.constStr
}.mkString("", sep, sep)
}
}
val ranges = SerializedRange(prefixes.map(p => new AcRange(p, p + "~")))
val stTable = ds.getTableName(feature, SpatioTemporalTable)
val instance = dsParams(instanceIdParam.getName)
val zoos = dsParams(zookeepersParam.getName)
val user = dsParams(userParam.getName)
val pwd = dsParams(passwordParam.getName)
val input = AccumuloInputOptions(instance, zoos, user, pwd, stTable, ranges)
val output = AccumuloOutputOptions(instance, zoos, user, pwd, stTable)
(input, output)
}
// validation
assert(sft != null, s"The feature '$feature' does not exist in the input data store")
// scalding job
TypedPipe.from(AccumuloSource(input))
.flatMap { case (key, value) =>
if (SpatioTemporalTable.isIndexEntry(key) || SpatioTemporalTable.isDataEntry(key)) {
// already up-to-date
Seq.empty
} else {
val visibility = key.getColumnVisibilityParsed
val delete = new Mutation(key.getRow)
delete.putDelete(key.getColumnFamily, key.getColumnQualifier, visibility)
val mutations = if (key.getColumnQualifier.toString == "SimpleFeatureAttribute") {
// data entry, re-calculate the keys for index and data entries
val sf = decoder.deserialize(value.get())
val toWrite = new FeatureToWrite(sf, key.getColumnVisibility.toString, featureEncoder, indexValueEncoder)
encoder.encode(toWrite)
} else {
// index entry, ignore it (will be handled by associated data entry)
Seq.empty
}
(Seq(delete) ++ mutations).map((null: Text, _))
}
}.write(AccumuloSource(output))
override def afterJobTasks() = {
// schedule a table compaction to remove the deleted entries
val ds = DataStoreFinder.getDataStore(dsParams.asJava).asInstanceOf[AccumuloDataStore]
ds.connector.tableOperations().compact(output.table, null, null, true, false)
ds.setIndexSchemaFmt(feature, ds.buildDefaultSpatioTemporalSchema(feature))
ds.setGeomesaVersion(feature, UPDATE_TO_VERSION)
}
}
object SortedIndexUpdateJob {
def runJob(conf: Configuration, params: Map[String, String], feature: String) = {
val args = toInArgs(params) ++ Seq(FEATURE_IN -> List(feature)).toMap
val instantiateJob = (args: Args) => new SortedIndexUpdateJob(args)
GeoMesaBaseJob.runJob(conf, args, instantiateJob)
}
}
| drackaer/geomesa | geomesa-jobs/src/main/scala/org/locationtech/geomesa/jobs/index/SortedIndexUpdateJob.scala | Scala | apache-2.0 | 5,311 |
import com.thesamet.proto.e2e.custom_types._
import com.thesamet.proto.e2e.custom_types.CustomMessage.Weather
import com.thesamet.proto.e2e.CustomTypes.{CustomMessage => CustomMessageJava}
import com.thesamet.proto.e2e.CustomTypes.CustomMessage.{Weather => WeatherJava}
import org.scalatest._
import com.thesamet.pb._
class CustomTypesSpec extends FlatSpec with MustMatchers {
"CustomMessage" should "serialize and parse" in {
val message = CustomMessage(
personId = Some(PersonId("abcd")),
requiredPersonId = PersonId("required"),
personIds = Seq(PersonId("p1"), PersonId("p2")),
age = Some(Years(27)),
requiredAge = Years(25),
ages = Seq(Years(3), Years(8), Years(35)),
name = Some(FullName(firstName = "Foo", lastName = "Bar")),
requiredName = FullName(firstName = "Owen", lastName = "Money"),
names = Seq(
FullName(firstName = "Foo", lastName = "Bar"),
FullName(firstName = "V1", lastName = "Z2")),
weather = Some(WrappedWeather(Weather.RAIN)),
requiredWeather = WrappedWeather(Weather.SUNNY),
weathers = Seq(WrappedWeather(Weather.RAIN), WrappedWeather(Weather.SUNNY)),
packedWeathers = Seq(WrappedWeather(Weather.RAIN), WrappedWeather(Weather.RAIN))
)
message.getPersonId must be(PersonId("abcd"))
message.requiredPersonId must be(PersonId("required"))
message.personIds must be(Seq(PersonId("p1"), PersonId("p2")))
message.getAge must be(Years(27))
message.requiredAge must be(Years(25))
message.ages must be(Seq(Years(3), Years(8), Years(35)))
message.getName must be(FullName("Foo", "Bar"))
CustomMessage.parseFrom(message.toByteArray) must be(message)
CustomMessage.toJavaProto(message).getPersonId must be("abcd")
CustomMessage.toJavaProto(message).getRequiredPersonId must be("required")
CustomMessage.toJavaProto(message).getAge must be(27)
CustomMessage.toJavaProto(message).getRequiredAge must be(25)
CustomMessage.toJavaProto(message).getName.getFirst must be("Foo")
CustomMessage.toJavaProto(message).getName.getLast must be("Bar")
}
"Custom message types" should "concatenate correctly" in {
val m1 = {
val b = CustomMessageJava.newBuilder
b.getNameBuilder.setFirst("Foo")
b.setRequiredPersonId("p1")
b.getRequiredNameBuilder.setFirst("first_req")
b.setAge(4)
b.setRequiredAge(1)
b.setRequiredWeather(WeatherJava.SUNNY)
b.addPackedWeathers(WeatherJava.SUNNY)
b.addPackedWeathers(WeatherJava.RAIN)
b.build
}
val m2 = {
val b = CustomMessageJava.newBuilder
b.getNameBuilder.setLast("Bar")
b.setRequiredPersonId("p2")
b.getRequiredNameBuilder.setLast("last_req")
b.setAge(5)
b.setRequiredAge(2)
b.setRequiredWeather(WeatherJava.RAIN)
b.addPackedWeathers(WeatherJava.RAIN)
b.addPackedWeathers(WeatherJava.SUNNY)
b.build
}
val expected = CustomMessage(
requiredPersonId = PersonId("p2"),
requiredAge = Years(2),
requiredName = FullName("first_req", "last_req"),
requiredWeather = WrappedWeather(Weather.RAIN),
packedWeathers = Seq(
WrappedWeather(Weather.SUNNY),
WrappedWeather(Weather.RAIN),
WrappedWeather(Weather.RAIN),
WrappedWeather(Weather.SUNNY)
)
)
.update(
_.name := FullName("Foo", "Bar"),
_.age := Years(5)
)
val concat = (m1.toByteArray ++ m2.toByteArray)
CustomMessage.parseFrom(concat) must be(expected)
}
"Extended types" should "inherit from marker type" in {
val t: DomainEvent = CustomerEvent(
personId = Some(PersonId("123")),
optionalNumber = Some(1),
repeatedNumber = Seq(2,3,4),
requiredNumber = 5)
t mustBe a [DomainEvent]
t.personId must be(Some(PersonId("123")))
t.optionalNumber must be(Some(1))
t.repeatedNumber must be(Seq(2,3,4))
t.requiredNumber must be(5)
}
"Extended companion objects" should "inherit from marker type" in {
CustomerEvent mustBe a [DomainEventCompanion]
CustomerEvent.thisIs must be("The companion object")
}
}
| dotty-staging/ScalaPB | e2e/src/test/scala/CustomTypesSpec.scala | Scala | apache-2.0 | 4,156 |
package edu.chop.cbmi.dataExpress.test.dataModels
import org.scalatest.{BeforeAndAfter, GivenWhenThen, FunSpec}
import org.scalatest.matchers.ShouldMatchers
import edu.chop.cbmi.dataExpress.backends.file._
import edu.chop.cbmi.dataExpress.dataModels.{DataRow, DataTable}
import java.io.File
import edu.chop.cbmi.dataExpress.dataModels.file.FileTable
import edu.chop.cbmi.dataExpress.backends.file.TextFileBackend
import edu.chop.cbmi.dataExpress.dataModels.SeqColumnNames
import edu.chop.cbmi.dataExpress.backends.file.DelimiterMarshaller
import edu.chop.cbmi.dataExpress.backends.file.HeaderRowColumnNames
import edu.chop.cbmi.dataExpress.dataModels.RichOption._
/**
* Created with IntelliJ IDEA.
* User: masinoa
* Date: 5/10/13
* Time: 10:16 AM
*/
class FileTableSpec extends FunSpec with GivenWhenThen with ShouldMatchers with BeforeAndAfter{
lazy val file = new File("./output/FileTableSpec.dat")
before{
if(file.exists())file.delete()
//create a file to read
f.backend.makeNewFile()
f.backend.write(DataRow(f.colNames.zip(f.colNames): _*), Overwrite)
f.backend.write(f.rows.iterator, Append)
}
after{
if(file.exists())file.delete()
}
def fixture() = {
new {
val colNames = Seq("Name","ID","Known")
val content = List("Bob,249,true","Jane Doe,3430,false","Mike R.,,false","Steve,83839,")
val cng = HeaderRowColumnNames(file,",")
val marshaller = DelimiterCustomMarshaller(",", cng, (a:Array[Option[String]])=>{
val name = a(0) match{
case Some(s) => s.toString
case _ => null
}
val id = a(1) match{
case Some(s) => s.toInt
case _ => null
}
val known = a(2) match{
case Some(s) => s.toBoolean
case _ => null
}
Array(name, id, known)
})
val backend = TextFileBackend(file, marshaller, 1)
val rows = {
val cg = SeqColumnNames(colNames)
val mars = DelimiterMarshaller(",",cg)
content.map{line => mars.unmarshall(line)}
}
}
}
val f = fixture()
describe("A File Table"){
it("should result from a call to DataTable"){
Given("A file backend and a columnanme generator")
val ft:FileTable = DataTable(f.backend, f.cng)
And("the table should have column names from the header")
ft.column_names should equal(f.colNames)
And("the table should be an iterator over DataRow that match the content")
(0 /: ft){(idx, row) =>
val lineVals = f.content(idx).split(",")
row.Name.as[String] match{
case Some(s) => lineVals(0).trim should equal(s)
case _ => lineVals(0).trim should equal("")
}
row.ID.as[Int] match{
case Some(i) => lineVals(1).trim.toInt should equal(i)
case _ => lineVals(1).trim should equal("")
}
row.Known.as[Boolean] match{
case Some(b) => lineVals(2).toBoolean should equal(b)
case _ => lineVals.length should be <(3)
}
idx + 1
}
And("it should allow column access")
When("accessed using the col it will return a FileColumn[Option[_]]")
(0/: ft.col("ID")){(idx,id) =>
val lineVals = f.content(idx).split(",")
id.as[Int] match{
case Some(ids) => lineVals(1).trim.toInt should equal(ids)
case _ => lineVals(1).trim should equal("")
}
idx + 1
}
When("accessed using the col_as[G] return a FileColumn[Option[G]] IF THE MARSHALLER SUPPORTS IT")
(0/: ft.col_as[Int]("ID")){(idx,id) =>
val lineVals = f.content(idx).split(",")
id match{
case Some(v) => lineVals(1).trim.toInt should equal(v)
case _ => lineVals(1).trim should equal("")
}
idx + 1
}
When("accessed using the col_asu[G] return a FileColumn[G] IF THE MARSHALLER SUPPORTS IT AND THE COLUMN HAS NO EMPTY ROWS")
(0/: ft.col_asu[String]("Name")){(idx,name) =>
val lineVals = f.content(idx).split(",")
name should equal(lineVals(0))
idx + 1
}
}
}
}
| chop-dbhi/dataexpress | src/test/scala/edu/chop/cbmi/dataExpress/test/dataModels/FileTableSpec.scala | Scala | bsd-2-clause | 4,177 |
package streams
import common._
/**
* This component implements a parser to define terrains from a
* graphical ASCII representation.
*
* When mixing in that component, a level can be defined by
* defining the field `level` in the following form:
*
* val level =
* """------
* |--ST--
* |--oo--
* |--oo--
* |------""".stripMargin
*
* - The `-` character denotes parts which are outside the terrain
* - `o` denotes fields which are part of the terrain
* - `S` denotes the start position of the block (which is also considered
* inside the terrain)
* - `T` denotes the final position of the block (which is also considered
* inside the terrain)
*
* In this example, the first and last lines could be omitted, and
* also the columns that consist of `-` characters only.
*/
trait StringParserTerrain extends GameDef {
/**
* A ASCII representation of the terrain. This field should remain
* abstract here.
*/
val level: String
/**
* This method returns terrain function that represents the terrain
* in `levelVector`. The vector contains parsed version of the `level`
* string. For example, the following level
*
* val level =
* """ST
* |oo
* |oo""".stripMargin
*
* is represented as
*
* Vector(Vector('S', 'T'), Vector('o', 'o'), Vector('o', 'o'))
*
* The resulting function should return `true` if the position `pos` is
* a valid position (not a '-' character) inside the terrain described
* by `levelVector`.
*/
def terrainFunction(levelVector: Vector[Vector[Char]]): Pos => Boolean = {
(pos: Pos) =>{
def validRow = 0 <= pos.row && pos.row < levelVector.length
def validCol = 0 <= pos.col && pos.col < levelVector(0).length
validCol && validRow && levelVector(pos.row)(pos.col) != '-'
}
}
/**
* This function should return the position of character `c` in the
* terrain described by `levelVector`. You can assume that the `c`
* appears exactly once in the terrain.
*
* Hint: you can use the functions `indexWhere` and / or `indexOf` of the
* `Vector` class
*/
def findChar(c: Char, levelVector: Vector[Vector[Char]]): Pos = {
val row = levelVector indexWhere((rowVector) => rowVector.indexOf(c) > -1)
val col = levelVector(row) indexOf c
Pos(row, col)
}
private lazy val vector: Vector[Vector[Char]] =
Vector(level.split("\\n").map(str => Vector(str: _*)): _*)
lazy val terrain: Terrain = terrainFunction(vector)
lazy val startPos: Pos = findChar('S', vector)
lazy val goal: Pos = findChar('T', vector)
}
| javierarilos/coursera-fp-design-scala | week2-streams/src/main/scala/streams/StringParserTerrain.scala | Scala | apache-2.0 | 2,630 |
package phenan.prj.exception
case class ClassFileNotFoundException (msg: String) extends Exception(msg)
case class InvalidClassFileException (msg: String) extends Exception(msg)
case class InvalidTypeException (msg: String, cause: Throwable) extends Exception(msg)
object InvalidTypeException {
def apply (msg: String): InvalidTypeException = InvalidTypeException(msg, null)
}
case class ParseException (msg: String) extends Exception(msg)
case class InvalidASTException (msg: String, cause: Throwable) extends Exception(msg, cause)
object InvalidASTException {
def apply (msg: String): InvalidASTException = InvalidASTException(msg, null)
}
case class InitializationFailedException (cause: Throwable) extends Exception(cause)
| csg-tokyo/proteaj2 | src/main/scala/phenan/prj/exception/Exceptions.scala | Scala | mit | 739 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval
import monix.execution.exceptions.DummyException
import monix.execution.internal.Platform
import scala.concurrent.duration._
import scala.util.Success
object TaskForkAndForgetSuite extends BaseTestSuite {
test("Task.forkAndForget triggers execution in background thread") { implicit sc =>
var counter = 0
val task = Task.eval { counter += 1; counter }
val main = for {
_ <- task.delayExecution(1.second).forkAndForget
_ <- task.delayExecution(1.second).forkAndForget
} yield ()
val f = main.runToFuture
assertEquals(f.value, Some(Success(())))
assertEquals(counter, 0)
sc.tick(1.second)
assertEquals(counter, 2)
}
test("Task.forkAndForget triggers exceptions in background thread") { implicit sc =>
val dummy = new DummyException()
val task = Task.now(20)
val errorTask = Task.raiseError(dummy)
val result = for {
_ <- errorTask.forkAndForget
value <- task
} yield value
val f = result.runToFuture
sc.tick()
assertEquals(f.value, Some(Success(20)))
assertEquals(sc.state.lastReportedError, dummy)
}
test("Task.forkAndForget is stack safe") { implicit sc =>
val count = if (Platform.isJVM) 100000 else 5000
var task: Task[Any] = Task.evalAsync(1)
for (_ <- 0 until count) task = task.forkAndForget
for (_ <- 0 until count) task = task.flatMap(_ => Task.unit)
val f = task.runToFuture
sc.tick()
assertEquals(f.value, Some(Success(())))
}
} | Wogan/monix | monix-eval/shared/src/test/scala/monix/eval/TaskForkAndForgetSuite.scala | Scala | apache-2.0 | 2,181 |
package org.ensime
import scala.collection.immutable.ListMap
import org.ensime.sbt.util._
package object sbt {
type KeyMap = ListMap[KeywordAtom, SExp]
object KeyMap {
def apply(elems: (KeywordAtom, SExp)*) = ListMap[KeywordAtom, SExp]() ++ elems
}
implicit def tuples2TupleKeyMapOps(
tuples: List[(KeywordAtom, SExp)]): TupleKeyMapOps =
new TupleKeyMapOps(tuples)
class TupleKeyMapOps(tuples: List[(KeywordAtom, SExp)]) {
def toKeyMap: KeyMap = KeyMap() ++ tuples
}
implicit def keyMap2KeyMapOps(keyMap: KeyMap): KeyMapOps =
new KeyMapOps(keyMap)
class KeyMapOps(m1: KeyMap) {
def simpleMerge(m2:KeyMap):KeyMap = {
val keys = m1.keys.toList.diff(m2.keys.toList) ++ m2.keys
val merged: KeyMap = keys.map{ key =>
(m1.get(key), m2.get(key)) match{
case (Some(s1),None) => (key, s1)
case (None,Some(s2)) => (key, s2)
case (Some(SExpList(items1)),
Some(SExpList(items2))) => (key, SExpList(items1 ++ items2))
case (Some(s1:SExp),Some(s2:SExp)) => (key, s2)
case _ => (key, NilAtom())
}
}.toKeyMap
merged
}
}
type SymMap = ListMap[scala.Symbol, Any]
object SymMap {
def apply(elems: (scala.Symbol, Any)*) = ListMap[scala.Symbol, Any]() ++ elems
}
}
| todesking/ScaMS | src/main/scala/org.ensime.sbt/package.scala | Scala | mit | 1,311 |
package homepage.snippet
import scala.xml.NodeSeq
import net.liftweb.http.S._
import net.liftweb.http.SHtml._
import net.liftweb.http.RequestVar
import net.liftweb.util.Helpers._
import net.liftweb.common.Full
import homepage.model.Paper
import net.liftweb.mapper.OrderBy
import net.liftweb.mapper.Descending
/** Renders the papers*/
class Papers {
def showPapers(xhtml: NodeSeq): NodeSeq =
Paper.findAll(OrderBy(Paper.year, Descending)).map { p =>
<div>
<strong>{ p.title.is }</strong> ({ p.year.is })<br/>
<i>{ p.authors.is }</i> at {journalXhtml(p)}<br/><br/>
</div>
}.toSeq
def journalXhtml(p: Paper): NodeSeq = if (p.url.isEmpty() || p.url.is.isEmpty()) <span>{ p.journal.is } </span> else
<a href={p.url.is} alt={ "Go to paper page on publisher's site" } title={ "Go to paper page on publisher's site" }>{ p.journal.is }</a>
}
| bbiletskyy/homepage | src/main/scala/homepage/snippet/Papers.scala | Scala | apache-2.0 | 889 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.{CatalogStatistics, CatalogTableType}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.plans.logical._
/**
* Analyzes the given columns of the given table to generate statistics, which will be used in
* query optimizations.
*/
case class AnalyzeColumnCommand(
tableIdent: TableIdentifier,
columnNames: Seq[String]) extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val sessionState = sparkSession.sessionState
val db = tableIdent.database.getOrElse(sessionState.catalog.getCurrentDatabase)
val tableIdentWithDB = TableIdentifier(tableIdent.table, Some(db))
val tableMeta = sessionState.catalog.getTableMetadata(tableIdentWithDB)
if (tableMeta.tableType == CatalogTableType.VIEW) {
throw new AnalysisException("ANALYZE TABLE is not supported on views.")
}
val sizeInBytes = AnalyzeTableCommand.calculateTotalSize(sessionState, tableMeta)
// Compute stats for each column
val (rowCount, newColStats) = computeColumnStats(sparkSession, tableIdentWithDB, columnNames)
// We also update table-level stats in order to keep them consistent with column-level stats.
val statistics = CatalogStatistics(
sizeInBytes = sizeInBytes,
rowCount = Some(rowCount),
// Newly computed column stats should override the existing ones.
colStats = tableMeta.stats.map(_.colStats).getOrElse(Map.empty) ++ newColStats)
sessionState.catalog.alterTable(tableMeta.copy(stats = Some(statistics)))
// Refresh the cached data source table in the catalog.
sessionState.catalog.refreshTable(tableIdentWithDB)
Seq.empty[Row]
}
/**
* Compute stats for the given columns.
* @return (row count, map from column name to ColumnStats)
*/
private def computeColumnStats(
sparkSession: SparkSession,
tableIdent: TableIdentifier,
columnNames: Seq[String]): (Long, Map[String, ColumnStat]) = {
val relation = sparkSession.table(tableIdent).logicalPlan
// Resolve the column names and dedup using AttributeSet
val resolver = sparkSession.sessionState.conf.resolver
val attributesToAnalyze = AttributeSet(columnNames.map { col =>
val exprOption = relation.output.find(attr => resolver(attr.name, col))
exprOption.getOrElse(throw new AnalysisException(s"Column $col does not exist."))
}).toSeq
// Make sure the column types are supported for stats gathering.
attributesToAnalyze.foreach { attr =>
if (!ColumnStat.supportsType(attr.dataType)) {
throw new AnalysisException(
s"Column ${attr.name} in table $tableIdent is of type ${attr.dataType}, " +
"and Spark does not support statistics collection on this column type.")
}
}
// Collect statistics per column.
// The first element in the result will be the overall row count, the following elements
// will be structs containing all column stats.
// The layout of each struct follows the layout of the ColumnStats.
val ndvMaxErr = sparkSession.sessionState.conf.ndvMaxError
val expressions = Count(Literal(1)).toAggregateExpression() +:
attributesToAnalyze.map(ColumnStat.statExprs(_, ndvMaxErr))
val namedExpressions = expressions.map(e => Alias(e, e.toString)())
val statsRow = Dataset.ofRows(sparkSession, Aggregate(Nil, namedExpressions, relation)).head()
val rowCount = statsRow.getLong(0)
val columnStats = attributesToAnalyze.zipWithIndex.map { case (expr, i) =>
(expr.name, ColumnStat.rowToColumnStat(statsRow.getStruct(i + 1)))
}.toMap
(rowCount, columnStats)
}
}
| jianran/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeColumnCommand.scala | Scala | apache-2.0 | 4,687 |
package scaffvis
/**
* Generic reusable computation which remembers the last value.
*
* @param f the function
* @tparam I input
* @tparam K key used to compare equality of inputs (simple case: I = K)
* @tparam O output
* @param extractKey how to extract K from I
* @param eq how to compare two K
*/
case class ReusableComputation[I, K, O](f: I => O,
extractKey: I => K,
eq: (K, K) => Boolean
) extends (I => O) {
type Input = I
type Key = K
type Output = O
private var cacheKey: K = _
private var cacheVal: O = _
override def apply(x: I) = {
val key = extractKey(x)
if ((cacheKey == null) || (!eq(cacheKey, key))) { //update
cacheKey = key
cacheVal = f(x)
}
cacheVal
}
}
object ReusableComputation {
def simple[I, O](f: I => O) = simpleE(f, any_==)
def simpleE[I, O](f: I => O, eq: (I, I) => Boolean) = ReusableComputation[I, I, O](f, identity, eq)
def anyRefEq[T <: AnyRef](a: T, b: T): Boolean = a eq b
def any_==[T <: Any](a: T, b: T): Boolean = a == b
def productMembersEq[P <: Product](p1: P, p2: P): Boolean = {
if (p1.productArity != p2.productArity)
return false
var c: Int = 0
val cmax = p1.productArity
while (c < cmax) {
val p1e = p1.productElement(c)
val p2e = p2.productElement(c)
val equal = if (p1e.isInstanceOf[AnyRef] && p2e.isInstanceOf[AnyRef])
p1e.asInstanceOf[AnyRef] eq p2e.asInstanceOf[AnyRef]
else
p1e equals p2e
if (!equal) return false
c = c + 1
}
true
}
}
| velkoborsky/scaffvis | shared/src/main/scala/scaffvis/ReusableComputation.scala | Scala | gpl-3.0 | 1,664 |
/*
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.openjdk.jmh.samples
import org.openjdk.jmh.annotations._
import org.openjdk.jmh.infra.Blackhole
import java.util.concurrent.TimeUnit
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@State(Scope.Thread)
class JMHSample_09_Blackholes {
/*
* Should you need returning multiple results, you have to consider two options.
*
* NOTE: If you are only producing a single result, it is more readable to use
* the implicit return, as in org.openjdk.jmh.samples.JMHSample_08_DeadCode.
* Do not make your benchmark code less readable with explicit Blackholes!
*/
val x1 = Math.PI
val x2 = Math.PI * 2
/*
* Baseline measurement: how much single Math.log costs.
*/
@Benchmark
def baseline: Double = Math.log(x1)
/*
* While the Math.log(x2) computation is intact, Math.log(x1)
* is redundant and optimized out.
*/
@Benchmark
def measureWrong: Double = {
Math.log(x1)
Math.log(x2)
}
/*
* This demonstrates Option A:
*
* Merge multiple results into one and return it.
* This is OK when is computation is relatively heavyweight, and merging
* the results does not offset the results much.
*/
@Benchmark
def measureRight_1: Double = Math.log(x1) + Math.log(x2)
/*
* This demonstrates Option B:
*
* Use explicit Blackhole objects, and sink the values there.
* (Background: Blackhole is just another @State object, bundled with JMH).
*/
@Benchmark
def measureRight_2(bh: Blackhole): Unit = {
bh.consume(Math.log(x1))
bh.consume(Math.log(x2))
}
}
| bantonsson/sbt-jmh | src/sbt-test/sbt-jmh/run/src/main/scala/org/openjdk/jmh/samples/JMHSample_09_Blackholes.scala | Scala | apache-2.0 | 2,809 |
package com.codelab27.cards9.services.settings
trait GameSettings {
/**
* Maximum level for card stats.
*/
def CARD_MAX_LEVEL: Int
/**
* Maximum number for cards in hand.
*/
def MAX_HAND_CARDS: Int
}
| Codelab27/cards9-server | app/com/codelab27/cards9/services/settings/GameSettings.scala | Scala | gpl-2.0 | 224 |
package example
object Lists {
/**
* This method computes the sum of all elements in the list xs. There are
* multiple techniques that can be used for implementing this method, and
* you will learn during the class.
*
* For this example assignment you can use the following methods in class
* `List`:
*
* - `xs.isEmpty: Boolean` returns `true` if the list `xs` is empty
* - `xs.head: Int` returns the head element of the list `xs`. If the list
* is empty an exception is thrown
* - `xs.tail: List[Int]` returns the tail of the list `xs`, i.e. the the
* list `xs` without its `head` element
*
* ''Hint:'' instead of writing a `for` or `while` loop, think of a recursive
* solution.
*
* @param xs A list of natural numbers
* @return The sum of all elements in `xs`
*/
def sum(xs: List[Int]): Int = {
if(xs.isEmpty) 0 else xs.head + sum(xs.tail)
}
/**
* This method returns the largest element in a list of integers. If the
* list `xs` is empty it throws a `java.util.NoSuchElementException`.
*
* You can use the same methods of the class `List` as mentioned above.
*
* ''Hint:'' Again, think of a recursive solution instead of using looping
* constructs. You might need to define an auxiliary method.
*
* @param xs A list of natural numbers
* @return The largest element in `xs`
* @throws java.util.NoSuchElementException if `xs` is an empty list
*/
def max(xs: List[Int]): Int = {
def maxIter(h: Int,t: List[Int]):Int = {
if(t.isEmpty) h else
if(h > t.head) maxIter(h,t.tail) else maxIter(t.head,t.tail)
}
maxIter(xs.head,xs.tail)
}
}
| foomorrow/coursera-scala | example/src/main/scala/example/Lists.scala | Scala | gpl-2.0 | 1,682 |
package models.vimeo.video.util
import models.vimeo.video.Download
/**
* author: cvandrei
* since: 2016-02-02
*/
object DownloadHelper {
val quality = "hd"
val fileType = "video/mp4"
val width = 1280
val height = 720
val expires = "2016-01-22T15:13:33+00:00"
val link = "https://vimeo.com/api/file/download?clip_id=152690945&id=393716837&profile=113&codec=H264&exp=1453475613&sig=db6c87e0c3e2ea7706c39044beffc9f3fe666552"
val createdTime = "2016-01-22T11:52:48+00:00"
val fps = 25
val size = 120692533L
val md5 = "b2b3412e3d757943f58d661928ff81bc"
def defaultDownload: Download = {
Download(quality, fileType, width, height, expires, link, createdTime, fps, size, md5)
}
def defaultJson: String = {
s"""{
| "quality": "$quality",
| "type": "$fileType",
| "width": $width,
| "height": $height,
| "expires": "$expires",
| "link": "$link",
| "created_time": "$createdTime",
| "fps": $fps,
| "size": $size,
| "md5": "$md5"
|}""".stripMargin
}
}
| indarium/hbbTVPlugin | test/models/vimeo/video/util/DownloadHelper.scala | Scala | agpl-3.0 | 1,109 |
package com.taig.tmpltr.markup
import com.taig.tmpltr._
import play.api.mvc.Content
trait label
extends Tag.Body[label, Content]
{
val tag = "label"
} | Taig/Play-Tmpltr | app/com/taig/tmpltr/markup/label.scala | Scala | mit | 154 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import java.io.PrintStream
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.Deserializer
import org.hamcrest.CoreMatchers
import org.hamcrest.MatcherAssert._
import org.junit.Test
import org.mockito.Mockito._
class CustomDeserializer extends Deserializer[String] {
override def deserialize(topic: String, data: Array[Byte]): String = {
assertThat("topic must not be null", topic, CoreMatchers.notNullValue)
new String(data)
}
}
class CustomDeserializerTest {
@Test
def checkDeserializerTopicIsNotNull(): Unit = {
val formatter = new DefaultMessageFormatter()
formatter.keyDeserializer = Some(new CustomDeserializer)
formatter.writeTo(new ConsumerRecord("topic_test", 1, 1l, "key".getBytes, "value".getBytes),
mock(classOf[PrintStream]))
formatter.close()
}
}
| KevinLiLu/kafka | core/src/test/scala/kafka/tools/CustomDeserializerTest.scala | Scala | apache-2.0 | 1,694 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.catnap
package cancelables
import cats.Applicative
import cats.effect.CancelToken
import monix.catnap.CancelableF
import monix.catnap.CancelableF.Empty
/** Represents a class of cancelable references that can hold
* an internal reference to another cancelable (and thus has to
* support the assignment operator).
*
* On assignment, if this cancelable is already
* canceled, then no assignment should happen and the update
* reference should be canceled as well.
*/
trait AssignableCancelableF[F[_]] extends CancelableF[F] {
/**
* Sets the underlying reference to the given [[CancelableF]] reference.
*
* Contract:
*
* - the given reference gets canceled in case the assignable was
* already canceled
* - this operation might throw an error if the contract of the
* implementation doesn't allow for calling `set` multiple times
* (e.g. [[SingleAssignCancelableF]])
*/
def set(cancelable: CancelableF[F]): F[Unit]
}
object AssignableCancelableF {
/**
* Represents [[AssignableCancelableF]] instances that are also
* [[BooleanCancelableF]].
*/
trait Bool[F[_]] extends AssignableCancelableF[F] with BooleanCancelableF[F]
/**
* Interface for [[AssignableCancelableF]] types that can be
* assigned multiple times.
*/
trait Multi[F[_]] extends Bool[F]
/**
* Builds an [[AssignableCancelableF]] instance that's already canceled.
*/
def alreadyCanceled[F[_]](implicit F: Applicative[F]): Bool[F] with Empty[F] =
new Bool[F] with Empty[F] {
def set(ref: CancelableF[F]): F[Unit] = ref.cancel
def isCanceled: F[Boolean] = F.pure(true)
def cancel: CancelToken[F] = F.unit
}
/**
* Represents an [[AssignableCancelableF]] with no internal state and that
* doesn't do anything, either on assignment or on cancelation.
*
* It's a no-op.
*/
def dummy[F[_]](implicit F: Applicative[F]): Multi[F] =
new Multi[F] {
def set(ref: CancelableF[F]): F[Unit] = F.unit
def isCanceled: F[Boolean] = F.pure(false)
def cancel: CancelToken[F] = F.unit
}
}
| monixio/monix | monix-catnap/shared/src/main/scala/monix/catnap/cancelables/AssignableCancelableF.scala | Scala | apache-2.0 | 2,810 |
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package dataapi
import spray.json.DefaultJsonProtocol
import ckan.DataspaceJsonProtocol._
import java.lang.String
import spray.json.JsonFormat
import spray.httpx.unmarshalling.{MalformedContent, FromStringDeserializer}
case class DataspaceCreate (name: String, title: Option[String], description: Option[String]){
require(name matches "[a-z0-9_-]+")
}
case class DataspaceCreateWithId (id: String, name: String, title: Option[String], description: Option[String])
case class DataspaceUpdate (title: Option[String], description: Option[String])
case class DataspaceUpdateWithId (id: String, title: Option[String], description: Option[String])
//case class Resource(id: String, name: Option[String], description: Option[String], format: Option[String], package_id: String, upload: String )
case class PackageCreateWithId(name: String, owner_org: String, title: String, `private`: Boolean = true)
case class CkanOrganizationMember(id: String, username: String, role: String) {
require(role == "admin" || role == "editor" || role == "member")
}
case class ShibData(mail: String, eppn: String, cn: String)
case class CkanUser(name: String, email: String, password: String, id: String, fullname: String, openid: String)
case class CkanErrorMsg (message: String, __type: String)
case class CkanResponse[T](help: String, success: Boolean, result: Option[T], error: Option[CkanErrorMsg])
object StateFilter extends Enumeration {
type StateFilter = Value
val ACTIVE, DELETED, ALL = Value
}
object CkanJsonProtocol extends DefaultJsonProtocol {
implicit val dataspaceCreateFormat = jsonFormat3(DataspaceCreate)
implicit val dataspaceCreateWithIdFormat = jsonFormat4(DataspaceCreateWithId)
implicit val dataspaceUpdateFormat = jsonFormat2(DataspaceUpdate)
implicit val dataspaceUpdateWithIdFormat = jsonFormat3(DataspaceUpdateWithId)
implicit val packageCreateWithIdFormat = jsonFormat4(PackageCreateWithId)
implicit val shibDataFormat = jsonFormat3(ShibData)
implicit val ckanUserFormat = jsonFormat6(CkanUser)
implicit val ckanErrorMsgFormat = jsonFormat2(CkanErrorMsg)
implicit def ckanResponseFormat[T: JsonFormat] = lazyFormat(jsonFormat4(CkanResponse.apply[T]))
implicit val ckanOrganizationMember = jsonFormat3(CkanOrganizationMember)
}
object StateFilterProtocol {
implicit val stringToStateFilter = new FromStringDeserializer[StateFilter.StateFilter] {
def apply(value: String) = {
val name = value.toUpperCase
if (name == StateFilter.ACTIVE.toString) { Right(StateFilter.ACTIVE) }
else if (name == StateFilter.DELETED.toString) { Right(StateFilter.DELETED) }
else if (name == StateFilter.ALL.toString) { Right(StateFilter.ALL) }
else Left(MalformedContent(s"Invalid value '$value'. Valid values are 'active', 'deleted', and 'all'"))
}
}
} | ivan-cukic/litef-conductor | src/main/scala/dataapi/CustomFormats.scala | Scala | apache-2.0 | 3,555 |
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the RestaurantOrder entity.
*/
class RestaurantOrderGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://127.0.0.1:8080"""
val httpConf = http
.baseURL(baseURL)
.inferHtmlResources()
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connectionHeader("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"X-XSRF-TOKEN" -> "${xsrf_token}"
)
val scn = scenario("Test the RestaurantOrder entity")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))
.check(headerRegex("Set-Cookie", "XSRF-TOKEN=(.*);[\\\\s]").saveAs("xsrf_token"))).exitHereIfFailed
.pause(10)
.exec(http("Authentication")
.post("/api/authentication")
.headers(headers_http_authenticated)
.formParam("j_username", "admin")
.formParam("j_password", "admin")
.formParam("remember-me", "true")
.formParam("submit", "Login")
.check(headerRegex("Set-Cookie", "XSRF-TOKEN=(.*);[\\\\s]").saveAs("xsrf_token"))).exitHereIfFailed
.pause(1)
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10)
.repeat(2) {
exec(http("Get all restaurantOrders")
.get("/api/restaurant-orders")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10 seconds, 20 seconds)
.exec(http("Create new restaurantOrder")
.post("/api/restaurant-orders")
.headers(headers_http_authenticated)
.body(StringBody("""{"id":null, "rate":"0", "persons":"0", "comment":"SAMPLE_TEXT", "created":"2020-01-01T00:00:00.000Z", "updated":"2020-01-01T00:00:00.000Z", "status":null}""")).asJSON
.check(status.is(201))
.check(headerRegex("Location", "(.*)").saveAs("new_restaurantOrder_url"))).exitHereIfFailed
.pause(10)
.repeat(5) {
exec(http("Get created restaurantOrder")
.get("${new_restaurantOrder_url}")
.headers(headers_http_authenticated))
.pause(10)
}
.exec(http("Delete created restaurantOrder")
.delete("${new_restaurantOrder_url}")
.headers(headers_http_authenticated))
.pause(10)
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(rampUsers(100) over (1 minutes))
).protocols(httpConf)
}
| goxhaj/gastronomee | src/test/gatling/simulations/RestaurantOrderGatlingTest.scala | Scala | apache-2.0 | 3,550 |
/** Copyright 2015 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prediction.controller
import io.prediction.core.BaseAlgorithm
/** A concrete implementation of [[LServing]] returning the first algorithm's
* prediction result directly without any modification.
*
* @group Serving
*/
class LFirstServing[Q, P] extends LServing[Q, P] {
/** Returns the first algorithm's prediction. */
def serve(query: Q, predictions: Seq[P]): P = predictions.head
}
/** A concrete implementation of [[LServing]] returning the first algorithm's
* prediction result directly without any modification.
*
* @group Serving
*/
object LFirstServing {
/** Returns an instance of [[LFirstServing]]. */
def apply[Q, P](a: Class[_ <: BaseAlgorithm[_, _, Q, P]]): Class[LFirstServing[Q, P]] =
classOf[LFirstServing[Q, P]]
}
| adamharish/PredictionIO | core/src/main/scala/io/prediction/controller/LFirstServing.scala | Scala | apache-2.0 | 1,384 |
/*
* Copyright (c) 2016. StulSoft, Yuriy Stul
*/
package com.stulsoft.ysps.ppromise
import com.typesafe.scalalogging.LazyLogging
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{Future, Promise}
/**
* Created by Yuriy Stul on 10/29/2016.
*/
object PromiseExample3 extends App with LazyLogging {
/**
* Does a work and creates a failure
*
* @return Future[String] Success(result); Failure(exception)
*/
def process(): Future[String] = {
//Initialize a promise
val promise = Promise[String]()
// Do a work asynchronously
Future {
Thread.sleep(1000)
// Mark work as failed
promise failure new RuntimeException("test error")
}
// Return future
promise.future
}
logger.info("Main started")
val p = process()
p foreach { r => logger.info(r) }
Thread.sleep(1500)
logger.info("Main ended")
}
| ysden123/ysps | src/main/scala/com/stulsoft/ysps/ppromise/PromiseExample3.scala | Scala | mit | 913 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kylin.engine.spark
import java.util.Locale
import org.apache.commons.lang.StringUtils
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SparkSession
object SparkSqlOnLivyBatch extends Logging{
def main(args: Array[String]) {
if (args.length != 1) {
log.info("Usage: SparkSqlOnLivyBatch <sqlstring>")
System.exit(1)
}
val sql : String = args(0)
log.info(String.format(Locale.ROOT, "Sql-Info : %s", sql))
val spark = SparkSession.builder().enableHiveSupport().appName("kylin-sql-livy").getOrCreate()
val sqlStrings = sql.split(";")
for (sqlString <- sqlStrings) {
var item = sqlString.trim()
if (item.length > 0) {
if (StringUtils.endsWith(item, "\\\\")) {
item = StringUtils.chop(item)
}
spark.sql(item)
}
}
}
}
| apache/incubator-kylin | engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkSqlOnLivyBatch.scala | Scala | apache-2.0 | 1,658 |
package com.signalcollect.dcop.test
import com.signalcollect.dcop.vertices.VariableVertex
import com.signalcollect.dcop.vertices.id.MaxSumId
import scala.collection.mutable.ArrayBuffer
object ReferenceTest extends App {
val vv = new VariableVertex(new MaxSumId(0,2), 1)
val array = ArrayBuffer(vv)
println(array)
vv.state = 1230
println(array)
} | gmazlami/dcop-maxsum | src/main/scala/com/signalcollect/dcop/test/ReferenceTest.scala | Scala | apache-2.0 | 360 |
package sp.server
import akka.{NotUsed, Done}
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.stream.{ActorMaterializer, Materializer, SourceShape}
import scala.concurrent.duration._
import scala.concurrent._
import akka.util.Timeout
import scala.reflect.ClassTag
import akka.stream.scaladsl._
import akka.http.scaladsl.server.{Directives, MethodRejection, SchemeRejection}
import akka.stream.scaladsl.Flow
import akka.cluster.pubsub._
import DistributedPubSubMediator._
import akka.http.scaladsl.model.ws.TextMessage.Strict
import scala.util._
import sp.domain._
import sp.domain.Logic._
object APIWebSocket {
sealed trait API
case class PublishMessage(mess: SPMessage, topic: String = "services") extends API
case class FilterHeader(keyValues: Map[String, Set[SPValue]]) extends API
case class FilterBody(keyValues: Map[String, Set[SPValue]]) extends API
// removing filters with keys in the set keys. If it is empty, all keys are removed
case class ClearFilters(keys: Set[String] = Set()) extends API
object API {
implicit val apiFormat = deriveFormatISA[API]
}
}
import sp.server.{APIWebSocket => api}
/**
* Used by the SP launcher file
* Created by Kristofer on 2014-06-19.
*/
class LaunchGUI(system: ActorSystem) {
implicit val timeout = Timeout(5 seconds)
import scala.concurrent.Future
import akka.pattern.ask
implicit val actorSystem = system
implicit val materializer = ActorMaterializer()
implicit val dispatcher = system.dispatcher
val mediator = DistributedPubSub(system).mediator
val log = org.slf4j.LoggerFactory.getLogger(getClass.getName)
def launch = {
val webFolder: String = system.settings.config getString "sp.webFolder"
val devFolder: String = system.settings.config getString "sp.devFolder"
val buildFolder: String = system.settings.config getString "sp.buildFolder"
val devMode: Boolean = system.settings.config getBoolean "sp.devMode"
val interface = system.settings.config getString "sp.interface"
val port = system.settings.config getInt "sp.port"
val srcFolder: String = if(devMode)
devFolder else buildFolder
def api =
path("socket" / Segment / JavaUUID){ (topic, id) =>
get{
extractUpgradeToWebSocket{ upg =>
val h = new WebsocketHandler(mediator, topic, id)
complete(upg.handleMessagesWithSinkSource(h.fromFrontEnd, h.toFrontEnd))
}
}
} ~
pathPrefix("api") {
get {
pathEndOrSingleSlash {
complete("THE SP API")
}
} ~
postMessage("ask", true) ~
postMessage("publish", false)
}
val route =
api ~
pathEndOrSingleSlash {
getFromFile(srcFolder + "/index.html")
} ~
getFromDirectory(srcFolder) ~
getFromDirectory(webFolder) ~
getFromFile(srcFolder + "/index.html")
getFromFile(webFolder + "/index.html")
import java.io.InputStream
import java.security.{ SecureRandom, KeyStore }
import javax.net.ssl.{ SSLContext, TrustManagerFactory, KeyManagerFactory }
import akka.actor.ActorSystem
import akka.http.scaladsl.server.{ Route, Directives }
import akka.http.scaladsl.{ ConnectionContext, HttpsConnectionContext, Http }
import akka.stream.ActorMaterializer
import com.typesafe.sslconfig.akka.AkkaSSLConfig
val serverContext: ConnectionContext = {
val password = "abcdef".toCharArray
val context = SSLContext.getInstance("TLS")
val ks = KeyStore.getInstance("PKCS12")
ks.load(getClass.getClassLoader.getResourceAsStream("keys/server.p12"), password)
val keyManagerFactory = KeyManagerFactory.getInstance("SunX509")
keyManagerFactory.init(ks, password)
context.init(keyManagerFactory.getKeyManagers, null, new SecureRandom)
// start up the web server
ConnectionContext.https(context)
}
//Http().setDefaultServerHttpContext(serverContext)
val bindingFuture = Http().bindAndHandle(route, interface, port) //, connectionContext = serverContext)
log.info(s"Server started. System: ${system.name}, Adress: $interface:$port")
bindingFuture
// scala.io.StdIn.readLine("Press ENTER to exit application.\\n") match {
// case x => system.terminate()
// }
}
def postMessage(url: String, shouldAsk: Boolean) = {
path(url / Segments) { cmd =>
if (cmd.isEmpty)
reject(SchemeRejection("no topic"))
log.debug("postMessage event: "+ url +" - "+cmd)
val topic = cmd.head
val service = cmd.tail.headOption.getOrElse("")
post { entity(as[String]){data =>
val (mess, h) = fixMess(data, topic, service)
if (shouldAsk){
val answer = mediator.ask(Publish(topic, mess)).mapTo[String]
completeOrRecoverWith(answer){ extr =>
val re = SPMessage.make(h, APISP.SPError("No service answered the request"))
complete(re.toJson)
}
} else {
mediator ! Publish(topic, mess)
val re = SPMessage.make(h, APISP.SPACK()) // SPAttributes("result" ->"Message sent")
complete(re.toJson)
}
}}
}
}
def fixMess(mess: String, topic: String, service: String) = {
val uP = SPMessage.fromJson(mess)
val toSend = uP.map{ m =>
val updH = if (service.nonEmpty) {
m.header + ("service" -> SPValue(service))
} else m.header
(m.copy(header = updH).toJson, updH)
}
toSend.getOrElse(mess, SPAttributes.empty)
}
}
// test websockets
import akka.actor._
import akka.stream._
import akka.stream.scaladsl._
import akka.http.scaladsl.model.ws.{ Message, TextMessage }
// https://markatta.com/codemonkey/blog/2016/04/18/chat-with-akka-http-websockets/
// https://github.com/jrudolph/akka-http-scala-js-websocket-chat
class WebsocketHandler(mediator: ActorRef, topic: String, clientID: java.util.UUID) {
val log = org.slf4j.LoggerFactory.getLogger(getClass.getName)
case class Filters(h: Map[String, Set[SPValue]], b: Map[String, Set[SPValue]])
var filter = Filters(Map(), Map())
lazy val fromFrontEnd: Sink[Message, NotUsed] = Sink.fromGraph(GraphDSL.create() { implicit b: GraphDSL.Builder[NotUsed] =>
import GraphDSL.Implicits._
val toAPI = b.add(fromWebSocketToAPI)
val split = b.add(Broadcast[Try[APIWebSocket.API]](2))
val sendToBus: Sink[Publish, NotUsed] = Sink.actorRef[Publish](mediator, "Killing me")
toAPI ~> split ~> toBePublished ~> sendToBus
split ~> updFilters
SinkShape(toAPI.in)
})
val fromWebSocketToAPI: Flow[Message, Try[APIWebSocket.API], NotUsed] = Flow[Message]
.collect{ case TextMessage.Strict(text) => log.debug(s"Websocket got: $text"); text}
.map{str =>
SPAttributes.fromJson(str).flatMap(_.to[APIWebSocket.API])
}
val toBePublished: Flow[Try[APIWebSocket.API], Publish, NotUsed] = Flow[Try[APIWebSocket.API]]
.collect{case x: Success[APIWebSocket.API] => x.value}
.collect{
case APIWebSocket.PublishMessage(mess, t) =>
Publish(t, mess.toJson)
}
val updFilters: Sink[Try[APIWebSocket.API], Future[Done]] = Sink.foreach[Try[APIWebSocket.API]] { x =>
x.toOption.collect{
case APIWebSocket.FilterBody(f) =>
val updM = f
filter = filter.copy(b = filter.b ++ updM)
case APIWebSocket.FilterHeader(f) =>
val updM = f
filter = filter.copy(h = filter.h ++ updM)
case APIWebSocket.ClearFilters(keys) =>
if (keys.isEmpty)
filter = Filters(Map(), Map())
else {
val toRemove = (kv: (String, Set[SPValue])) => !keys.contains(kv._1)
val newHF = filter.h.filter(toRemove)
val newBF = filter.b.filter(toRemove)
filter = Filters(newHF, newBF)
}
}
}
lazy val toFrontEnd: Source[Message, NotUsed] = Source.fromGraph(GraphDSL.create() { implicit b: GraphDSL.Builder[NotUsed] =>
import GraphDSL.Implicits._
val in = b.add(receiveFromBus)
val out = b.add(injectKeepAlive)
in ~> parseMessagesFromBus ~> convertToWebSockMess ~> out
SourceShape(out.out)
})
val receiveFromBus: Source[Any, Unit] = Source.actorRef[Any](1000, OverflowStrategy.dropNew)
.mapMaterializedValue { ref =>
mediator ! Subscribe(topic, ref)
mediator ! Subscribe(clientID.toString, ref)
}
val parseMessagesFromBus: Flow[Any, String, NotUsed] = Flow[Any]
//.map{x => println(s"ws got before conv: $x"); x}
.collect{case str: String => str}
.map(str => SPMessage.fromJson(str))
.collect{case x: Some[SPMessage] => x.value}
.filter(messFilter)
.map(mess => mess.toJson)
//.map{x => println(s"ws will forward: $x"); x}
def messFilter(mess: SPMessage) = {
(filter.b.isEmpty && filter.h.isEmpty) ||
(filterPickle(mess.header, filter.h) && filterPickle(mess.body, filter.b))
}
def filterPickle(p: SPAttributes, f: Map[String, Set[SPValue]]) = {
Try{
f.forall(x => !p.keys.contains(x._1) || p.keys.contains(x._1) && x._2.contains(p.value(x._1)))
}.getOrElse(true)
}
val convertToWebSockMess: Flow[String, Strict, NotUsed] = Flow[String].map(str => TextMessage(str))
val injectKeepAlive: Flow[Strict, Strict, NotUsed] =
Flow[Strict].keepAlive(2.second, () => TextMessage("keep-alive"))
// lazy val webSocketHandler: Flow[Message, Message, NotUsed] = Flow.fromGraph(GraphDSL.create() { implicit b: GraphDSL.Builder[NotUsed] =>
// import GraphDSL.Implicits._
//
// val in = b.add(transformMessages)
// val out = b.add(sendToWebSocket)
//
// val parseResultBC = b.add(Broadcast[Try[api.APIWebSocket2]](2))
// val messageBC = b.add(Broadcast[MessageAndAck](2))
// val merge = b.add(Merge[APIWebSocket2](3))
//
// val sendReceive = Flow.fromSinkAndSource(sendToBus, receiveFromBus)
//
//
//
// in ~> parseResultBC ~> matchWebSocketMessages ~> messageBC ~> prepareToSend ~> sendReceive ~> parseMessagesFromBus ~> merge
// parseResultBC ~> parsingError ~> merge
// messageBC ~> prepareToSendACK ~> merge
// merge ~> convertAPIToString ~> out
//
// FlowShape(in.in, out.out)
// })
//
//
// val receiveFromBus: Source[Any, Unit] = Source.actorRef[Any](1000, OverflowStrategy.fail)
// .mapMaterializedValue { ref =>
// myRef = Some(ref)
// mediator ! Subscribe(topic, ref)
// }
//
//
// val sendToBus: Sink[Any, NotUsed] = Sink.actorRef[Any](mediator, "Killing me")
// val sendToWebSocket: Flow[String, Strict, NotUsed] = Flow[String].map(str => TextMessage(str))
//
//
// val transformMessages: Flow[Message, Try[APIWebSocket2], NotUsed] = Flow[Message]
// .collect{ case TextMessage.Strict(text) => println(s"Websocket got: $text"); text}
// .map{str => Try{read[APIWebSocket2](str)}}
//
// val matchWebSocketMessages: Flow[Try[APIWebSocket2], MessageAndAck, NotUsed] = Flow[Try[APIWebSocket2]]
// .collect{case x: Success[APIWebSocket2] => x.value}
// .collect{
// case PublishMessage(mess, t) =>
// MessageAndAck(Some(Publish(t, toJson(mess))), APIWebSocket.SPACK(s"Message sent to topic $t"))
// }
//
// val prepareToSend: Flow[MessageAndAck, Any, NotUsed] = Flow[MessageAndAck]
// .collect{case x: MessageAndAck if x.messToBus.isDefined => x.messToBus.get}
//
// val prepareToSendACK: Flow[MessageAndAck, APIWebSocket, NotUsed] = Flow[MessageAndAck]
// .collect{
// case x: MessageAndAck if x.messToBus.isDefined => x.reply
// case x: MessageAndAck if x.messToBus.isEmpty => APIWebSocket.SPError("Something wrong with the akka stream")
// }
//
// val parsingError: Flow[Try[APIWebSocket], APIWebSocket.SPError, NotUsed] = Flow[Try[APIWebSocket]]
// .collect{case x: Failure[APIWebSocket] => APIWebSocket.SPError(x.exception.getMessage)}
//
// val parseMessagesFromBus: Flow[Any, APIWebSocket, NotUsed] = Flow[Any]
// .collect{case str: String => str}
// .map(str => Try{upickle.default.read[UPickleMessage](str)})
// .collect{case x: Success[UPickleMessage] => x.value}
// .filter(messFilter)
// .map(mess => APIWebSocket.PublishMessage(mess, "FROMBUS"))
//
// val convertAPIToString = Flow[APIWebSocket]
// .map(x => toJson(x))
//
// val printFlow = Flow[Any].filter(x => {println(s"WE GOT FROM THE BUS: $x"); true})
}
| kristoferB/SP | spcore/src/main/scala/sp/server/LaunchGUI.scala | Scala | mit | 12,464 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import java.util.Locale
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, OneRowRelation}
import org.apache.spark.sql.test.SharedSQLContext
class QueryExecutionSuite extends SharedSQLContext {
test("toString() exception/error handling") {
val badRule = new SparkStrategy {
var mode: String = ""
override def apply(plan: LogicalPlan): Seq[SparkPlan] =
mode.toLowerCase(Locale.ROOT) match {
case "exception" => throw new AnalysisException(mode)
case "error" => throw new Error(mode)
case _ => Nil
}
}
spark.experimental.extraStrategies = badRule :: Nil
def qe: QueryExecution = new QueryExecution(spark, OneRowRelation)
// Nothing!
badRule.mode = ""
assert(qe.toString.contains("OneRowRelation"))
// Throw an AnalysisException - this should be captured.
badRule.mode = "exception"
assert(qe.toString.contains("org.apache.spark.sql.AnalysisException"))
// Throw an Error - this should not be captured.
badRule.mode = "error"
val error = intercept[Error](qe.toString)
assert(error.getMessage.contains("error"))
}
}
| JerryLead/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/QueryExecutionSuite.scala | Scala | apache-2.0 | 2,037 |
// Copyright (c) 2013, Johns Hopkins University. All rights reserved.
// This software is released under the 2-clause BSD license.
// See /LICENSE.txt
// Travis Wolfe, [email protected], 30 July 2013
package edu.jhu.hlt.parma.types
object SVec {
def apply(i: Int, v: Double): SVec = {
val sv = new SVec(1)
sv.add(i, v)
sv
}
def apply(ivs: (Int, Double)*): SVec = {
val sv = new SVec
for(iv <- ivs)
sv.add(iv._1, iv._2)
sv
}
def duplicate(sv: SVec): SVec = duplicate(sv, sv.items.size)
def duplicate(sv: SVec, newSize: Int): SVec = {
assert(newSize >= sv.items.size)
val nsv = new SVec(newSize)
nsv ++= sv
nsv
}
/**
* turns every index `i` in src into `i*stride + offset`
* and adds it to dest
*
* this is useful for some of the feature reindexing
* (e.g. DomainAdaptation and FeatureRefiner)
*/
def addWithStride(src: SVec, dest: SVec, stride: Int, offset: Int) {
if(offset >= stride || stride < 1 || offset < 0) {
throw new IllegalArgumentException("offset must be an index that is " +
"less than stride: offset=%d stride=%d".format(offset, stride))
}
src.items.foreach(iv => dest.add(iv._1 * stride + offset, iv._2))
}
}
sealed class SVec(initialSize: Int = 32) extends Serializable {
private[this] var indices = Array.ofDim[Int](initialSize)
private[this] var values = Array.ofDim[Double](initialSize)
private[this] var top = 0 // indices less than this are valid
private[this] var cap = initialSize // size of current arrays
/**
* WARNING: best not to mess with the backing unless you know what you're doing
*/
def getIndices: Array[Int] = indices
/**
* WARNING: best not to mess with the backing unless you know what you're doing
*/
def getValues: Array[Double] = values
/**
* WARNING: best not to mess with the backing unless you know what you're doing
*/
def getTop: Int = top
def add(i: Int, v: Double) {
if(i < 0)
throw new RuntimeException("you cannot give me a negative index! idx=%d value=%3g".format(i, v))
if(v == 0d) return
if(top == cap) {
cap = (cap * 1.5 + 2).toInt
indices = java.util.Arrays.copyOf(indices, cap)
values = java.util.Arrays.copyOf(values, cap)
}
indices(top) = i
values(top) = v
top += 1
}
/**
* adds every value in DVec where the indices are interpretted as:
* (offset, offset + 1, ..., offset + dv.dimension - 1)
*/
def add(dv: DVec, offset: Int) {
val n = dv.dimension
var i = 0
while(i < n) {
add(offset + i, dv(i))
i += 1
}
}
def ++=(sv: SVec) {
sv.items.foreach(iv => add(iv._1, iv._2))
}
def clear {
top = 0
}
def rawIndices = indices.toIndexedSeq
def numItems: Int = top
def items: Iterator[(Int, Double)] = (0 until top).map(i => (indices(i), values(i))).iterator
/**
* for debugging
*/
def containsBadValues(checkForNaN: Boolean = true, checkForInf: Boolean = true): Boolean = {
def bad(d: Double) = (checkForNaN && d.isNaN) || (checkForInf && d.isInfinite)
val allGood = items.map(_._2).forall(d => !bad(d))
!allGood
}
def compact {
val (i, v) = uniqItems
assert(i.length == v.length)
indices = i
values = v
top = i.length - 1
cap = i.length
}
/**
* this doesn't seem to work properly with the rest of
* mallet's pipeline/instancelist/training incantation,
* and it has to do with needing the keys entered into a
* common alphabet...
* TODO find a way to do this in mallet without an alphabet
*/
def toMallet = {
compact
new cc.mallet.types.SparseVector(indices, values)
}
// i could just just define apply for SVec, but that seems risky...
private sealed class IntDoubleAccum(size: Int) extends cern.colt.function.IntDoubleProcedure {
private[this] var ints = Array.ofDim[Int](size)
private[this] var doubles = Array.ofDim[Double](size)
private[this] var i = 0
override def apply(first: Int, second: Double): Boolean = {
ints(i) = first
doubles(i) = second
i += 1
true
}
def get = (ints, doubles)
}
def uniqItems: (Array[Int], Array[Double]) = {
val u = new cern.colt.map.OpenIntDoubleHashMap(top/2)
var i = 0
while(i < top) {
val idx = indices(i)
val old = u.get(idx)
u.put(idx, old + values(i))
i += 1
}
val accum = new IntDoubleAccum(u.size)
u.forEachPair(accum)
accum.get
}
def l2: Double = {
compact
var ss = 0d
var i = 0
while(i < top) {
val v = values(i)
ss += v * v
i += 1
}
math.sqrt(ss)
}
def l1: Double = {
compact
var s = 0d
var i = 0
while(i < top) {
val v = values(i)
if(v >= 0d) s += v
else s -= v
i += 1
}
s
}
def scale(factor: Double) {
var i = 0
while(i < top) {
values(i) *= factor
i += 1
}
}
}
| hltcoe/parma | src/main/scala/edu/jhu/hlt/parma/types/SVec.scala | Scala | bsd-2-clause | 4,696 |
/*
* Copyright (c) 2013-2016 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
import sbt._
object Dependencies {
val resolutionRepos = Seq(
// For Snowplow
"Snowplow Analytics Maven releases repo" at "http://maven.snplow.com/releases/",
"Snowplow Analytics Maven snapshot repo" at "http://maven.snplow.com/snapshots/",
// For Scalazon
"BintrayJCenter" at "http://jcenter.bintray.com",
// For uaParser utils
"user-agent-parser repo" at "https://clojars.org/repo/",
// For user-agent-utils
"user-agent-utils repo" at "https://raw.github.com/HaraldWalker/user-agent-utils/mvn-repo/"
)
object V {
// Java
val logging = "1.1.3"
val httpCore = "4.3"
val httpClient = "4.3.1"
val jacksonCore = "2.3.0"
val slf4j = "1.7.5"
val awsSdk = "1.6.11"
val kinesisClient = "1.6.1"
// Scala
val argot = "1.0.1"
val config = "1.0.2"
val scalaUtil = "0.1.0"
val snowplowRawEvent = "0.1.0"
val snowplowCommonEnrich = "0.23.1"
val scalazon = "0.11"
val scalaz7 = "7.0.0"
val igluClient = "0.4.0"
val snowplowTracker = "0.2.0"
// Scala (test only)
// val specs2 = "2.3.6" Conflicts with com.chuusai:shapeless
val specs2 = "2.2.3"
val scalazSpecs2 = "0.1.2"
// Scala (compile only)
val commonsLang3 = "3.1"
val thrift = "0.9.0"
}
object Libraries {
// Java
val logging = "commons-logging" % "commons-logging" % V.logging
val httpCore = "org.apache.httpcomponents" % "httpcore" % V.httpCore
val httpClient = "org.apache.httpcomponents" % "httpclient" % V.httpClient
val jacksonCore = "com.fasterxml.jackson.core" % "jackson-core" % V.jacksonCore
val slf4j = "org.slf4j" % "slf4j-simple" % V.slf4j
val log4jOverSlf4j = "org.slf4j" % "log4j-over-slf4j" % V.slf4j
val awsSdk = "com.amazonaws" % "aws-java-sdk" % V.awsSdk
val kinesisClient = "com.amazonaws" % "amazon-kinesis-client" % V.kinesisClient
// Scala
val argot = "org.clapper" %% "argot" % V.argot
val config = "com.typesafe" % "config" % V.config
val scalaUtil = "com.snowplowanalytics" % "scala-util" % V.scalaUtil
val snowplowRawEvent = "com.snowplowanalytics" % "snowplow-thrift-raw-event" % V.snowplowRawEvent
val snowplowCommonEnrich = "com.snowplowanalytics" % "snowplow-common-enrich" % V.snowplowCommonEnrich
val scalazon = "io.github.cloudify" %% "scalazon" % V.scalazon
val scalaz7 = "org.scalaz" %% "scalaz-core" % V.scalaz7
val igluClient = "com.snowplowanalytics" %% "iglu-scala-client" % V.igluClient
val snowplowTracker = "com.snowplowanalytics" %% "snowplow-scala-tracker" % V.snowplowTracker
// Scala (test only)
val specs2 = "org.specs2" %% "specs2" % V.specs2 % "test"
val scalazSpecs2 = "org.typelevel" %% "scalaz-specs2" % V.scalazSpecs2 % "test"
// Scala (compile only)
val commonsLang3 = "org.apache.commons" % "commons-lang3" % V.commonsLang3 % "compile"
val thrift = "org.apache.thrift" % "libthrift" % V.thrift % "compile"
}
}
| bigdecisions/snowplow | 3-enrich/stream-enrich/project/Dependencies.scala | Scala | apache-2.0 | 4,620 |
/*
* Copyright 2015 the original author or authors.
* @https://github.com/scouter-project/scouter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package scouter.server.core
;
import scouter.lang.counters.CounterConstants
import scouter.lang.value.DecimalValue
import scouter.lang.{CounterKey, TimeTypeEnum}
import scouter.lang.pack.PerfCounterPack
import scouter.server.Logger
import scouter.server.core.cache.CounterCache
import scouter.server.db.{DailyCounterWR, RealtimeCounterWR}
import scouter.server.plugin.PlugInManager
import scouter.server.plugin.alert.AlertEngine
import scouter.server.util.{EnumerScala, ThreadScala}
import scouter.util.{CastUtil, DateUtil, HashUtil, RequestQueue}
/**
* request queue of performance counter data and also dispatcher of the queue
*/
object PerfCountCore {
var queue = new RequestQueue[PerfCounterPack](CoreRun.MAX_QUE_SIZE);
ThreadScala.startDaemon("scouter.server.core.PerfCountCore", {CoreRun.running}) {
val counterPack = queue.get();
val objHash = HashUtil.hash(counterPack.objName);
PlugInManager.counter(counterPack);
if (counterPack.timetype == TimeTypeEnum.REALTIME) {
//counterPack.data.put(CounterConstants.COMMON_OBJHASH, new DecimalValue(objHash)) //add objHash into datafile
//counterPack.data.put(CounterConstants.COMMON_TIME, new DecimalValue(counterPack.time)) //add objHash into datafile
RealtimeCounterWR.add(counterPack);
EnumerScala.foreach(counterPack.data.keySet().iterator(), (k: String) => {
val value = counterPack.data.get(k);
val counterKey = new CounterKey(objHash, k, counterPack.timetype);
Auto5MSampling.add(counterKey, value);
CounterCache.put(counterKey, value);
AlertEngine.putRealTime(counterKey, value); //experimental
})
} else {
val yyyymmdd = CastUtil.cint(DateUtil.yyyymmdd(counterPack.time));
val hhmm = CastUtil.cint(DateUtil.hhmm(counterPack.time));
EnumerScala.foreach(counterPack.data.keySet().iterator(), (k: String) => {
val value = counterPack.data.get(k);
val counterKey = new CounterKey(objHash, k, counterPack.timetype);
DailyCounterWR.add(yyyymmdd, counterKey, hhmm, value);
//CounterCache.put(counterKey, value);
})
}
}
def add(p: PerfCounterPack) {
val ok = queue.put(p);
if (!ok) {
Logger.println("S109", 10, "queue exceeded!!");
}
}
}
| yuyupapa/OpenSource | scouter.server/src/scouter/server/core/PerfCountCore.scala | Scala | apache-2.0 | 3,135 |
package org.moe.interpreter
import org.scalatest.FunSuite
import org.scalatest.matchers.ShouldMatchers
import org.moe.runtime._
import org.moe.ast._
import ClassMatchers._
class ClassNodeTestSuite
extends FunSuite
with InterpreterTestUtils
with ShouldMatchers
with ClassMatchers {
test("... basic test with class") {
// class Point { }
val ast = wrapSimpleAST(
List(
ClassDeclarationNode(
"Point",
None,
StatementsNode(
List()
)
)
)
)
interpreter.eval(runtime, runtime.getRootEnv, ast)
val root_pkg = runtime.getRootPackage
root_pkg should haveClass("Point")
}
test("... basic test with constructor") {
// class Point { } Point->new
val ast = wrapSimpleAST(
List(
ClassDeclarationNode(
"Point",
None,
StatementsNode(
List()
)
),
MethodCallNode(
ClassAccessNode("Point"),
"new",
List()
)
)
)
val result = interpreter.eval(runtime, runtime.getRootEnv, ast)
result.getAssociatedClass match {
case Some(klass) => klass.getName should equal ("Point")
case None => assert(false)
}
}
test("... basic test with class and superclass") {
// class Point { }
// class Point3D extends Point { }
val ast = wrapSimpleAST(
List(
ClassDeclarationNode(
"Point",
None,
StatementsNode(
List()
)
),
ClassDeclarationNode(
"Point3D",
Some("Point"),
StatementsNode(
List()
)
)
)
)
interpreter.eval(runtime, runtime.getRootEnv, ast)
val root_pkg = runtime.getRootPackage
root_pkg should haveClass("Point")
root_pkg should haveClass("Point3D")
val point3d_class = root_pkg.getClass("Point3D")
point3d_class match {
case Some(point3d_class) => point3d_class should extendClass("Point")
case None => assert(false)
}
}
test("... basic test with class and attributes") {
// class Point { has $!x = 0; has $!y = 0 }
val ast = wrapSimpleAST(
List(
ClassDeclarationNode(
"Point",
None,
StatementsNode(
List(
AttributeDeclarationNode(
"$!x",
IntLiteralNode(0)
),
AttributeDeclarationNode(
"$!y",
IntLiteralNode(0)
)
)
)
)
)
)
interpreter.eval(runtime, runtime.getRootEnv, ast)
val point_class = runtime.getRootPackage.getClass("Point").getOrElse(
throw new Exception("Class expected") // This has been tested
)
point_class should haveAttribute("$!x")
point_class.getAttribute("$!x") match {
case Some(attr) =>
attr.getDefault match {
case Some(attr) => attr.unboxToInt.get should equal (0)
case None => assert(false)
}
case None => assert(false)
}
}
test("... basic test with two-arg constructor") {
// class Point { } Point->new
val ast = wrapSimpleAST(
List(
ClassDeclarationNode(
"Point",
None,
StatementsNode(
List(
AttributeDeclarationNode("$!x", IntLiteralNode(0)),
AttributeDeclarationNode("$!y", IntLiteralNode(0)),
MethodDeclarationNode(
"coords",
SignatureNode(List()),
StatementsNode(
List(
ArrayLiteralNode(
List(
AttributeAccessNode("$!x"),
AttributeAccessNode("$!y")
)
)
)
)
)
)
)
),
MethodCallNode(
MethodCallNode(
ClassAccessNode("Point"),
"new",
List(
PairLiteralNode(StringLiteralNode("x"), IntLiteralNode(150)),
PairLiteralNode(StringLiteralNode("y"), IntLiteralNode(250))
)
),
"coords",
List()
)
)
)
val result = interpreter.eval(runtime, runtime.getRootEnv, ast)
val coords = result.unboxToArrayBuffer.get
coords(0).unboxToInt.get should equal (150)
coords(1).unboxToInt.get should equal (250)
}
test("... basic test with class and methods") {
// class Counter { has $!n; method inc { ++$!n } }
val ast = wrapSimpleAST(
List(
ClassDeclarationNode(
"Counter",
None,
StatementsNode(
List(
AttributeDeclarationNode(
"$!n",
IntLiteralNode(0)
),
MethodDeclarationNode(
"inc",
SignatureNode(List()), // FIXME test with params when we have more operators :P
StatementsNode(
List(
PostfixUnaryOpNode(AttributeAccessNode("$!n"), "++")
)
)
)
)
)
)
)
)
interpreter.eval(runtime, runtime.getRootEnv, ast)
val counter_class = runtime.getRootPackage.getClass("Counter").getOrElse(
throw new Exception("Class expected") // This has been tested
)
counter_class should haveMethod("inc")
}
test("... basic test with method call") {
// class Foo { method zzz { 42 } } Foo->new->zzz()
val ast = wrapSimpleAST(
List(
ClassDeclarationNode(
"Foo",
None,
StatementsNode(
List(
MethodDeclarationNode(
"zzz",
SignatureNode(List()),
StatementsNode(
List(
IntLiteralNode(42)
)
)
)
)
)
),
MethodCallNode(
MethodCallNode(
ClassAccessNode("Foo"),
"new",
List()
),
"zzz",
List()
)
)
)
val result = interpreter.eval(runtime, runtime.getRootEnv, ast)
result.unboxToInt.get should equal (42)
}
}
| MoeOrganization/moe | src/test/scala/org/moe/interpreter/ClassNodeTestSuite.scala | Scala | mit | 6,399 |
/*
* Copyright 2016 agido GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.pageobject.examples.wikipedia
case class WikipediaEnPage(entry: String)
extends WikipediaPage("en", "Wikipedia")(entry)
| agido/pageobject | examples/src/test/scala/org/pageobject/examples/wikipedia/WikipediaEnPage.scala | Scala | apache-2.0 | 729 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.stats
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.collection.immutable.ListMap
import scala.reflect.ClassTag
/**
* An enumeration is merely a HashMap mapping values to number of occurrences
*
* @param sft simple feature type
* @param property property name the enumeration is being made for
* @tparam T some type T (which is restricted by the stat parser upstream of Histogram instantiation)
*/
class EnumerationStat[T] private [stats] (val sft: SimpleFeatureType,
val property: String)
(implicit ct: ClassTag[T]) extends Stat {
override type S = EnumerationStat[T]
@deprecated("property")
lazy val attribute: Int = i
private val i = sft.indexOf(property)
private [stats] val enumeration = scala.collection.mutable.HashMap.empty[T, Long].withDefaultValue(0)
def size: Int = enumeration.size
def values: Iterable[T] = enumeration.keys
def frequency(value: T): Long = enumeration(value)
def frequencies: Iterable[(T, Long)] = enumeration
override def observe(sf: SimpleFeature): Unit = {
val value = sf.getAttribute(i).asInstanceOf[T]
if (value != null) {
enumeration(value) += 1
}
}
override def unobserve(sf: SimpleFeature): Unit = {
val value = sf.getAttribute(i).asInstanceOf[T]
if (value != null) {
val current = enumeration(value)
if (current == 1) {
enumeration.remove(value)
} else {
enumeration(value) = current - 1
}
}
}
override def +(other: EnumerationStat[T]): EnumerationStat[T] = {
val plus = new EnumerationStat[T](sft, property)
plus += this
plus += other
plus
}
override def +=(other: EnumerationStat[T]): Unit =
other.enumeration.foreach { case (key, count) => enumeration(key) += count }
override def toJsonObject: Map[T, Long] =
if (enumeration.isEmpty) { Map.empty } else { ListMap(enumeration.toSeq.sortBy(_.toString):_*) }
override def isEmpty: Boolean = enumeration.isEmpty
override def clear(): Unit = enumeration.clear()
override def isEquivalent(other: Stat): Boolean = other match {
case that: EnumerationStat[_] => property == that.property && enumeration == that.enumeration
case _ => false
}
}
| ddseapy/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/stats/EnumerationStat.scala | Scala | apache-2.0 | 2,820 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle
import java.io.File
import org.junit.Assert.assertEquals
import org.junit.Test
import org.scalatest.junit.AssertionsForJUnit
class SourceFileParserTest extends AssertionsForJUnit {
@Test
def parseEmptyFile(): Unit = {
val configPath = "src/test/resources/config/scalastyle_config.xml"
val config = ScalastyleConfiguration.readFromXml(configPath)
val checks = config.checks.filter(_.enabled)
val sourcePath = new File("src/test/resources/testfiles/EmptyClass.scala")
val sourceFile = new DirectoryFileSpec(sourcePath.getAbsolutePath(), encoding = None, sourcePath.getAbsoluteFile())
val msgs = new CheckerUtils().verifyFile(config, checks, sourceFile)
assertEquals(Nil, msgs)
}
}
| scalastyle/scalastyle | src/test/scala/org/scalastyle/SourceFileParserTest.scala | Scala | apache-2.0 | 1,470 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package base
package types
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElementImpl
import com.intellij.lang.ASTNode
import api.base.types._
import com.intellij.psi.{PsiElement, ResolveState}
import com.intellij.psi.scope.PsiScopeProcessor
import api.statements.ScDeclaredElementsHolder
/**
* @author Alexander Podkhalyuzin
* Date: 07.03.2008
*/
class ScRefinementImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScRefinement{
override def toString: String = "Refinement"
override def processDeclarations(processor: PsiScopeProcessor, state: ResolveState, lastParent: PsiElement, place: PsiElement): Boolean = {
val iterator = types.iterator
while (iterator.hasNext) {
val elem = iterator.next
if (!processor.execute(elem, state)) return false
}
val iterator1 = holders.iterator.flatMap(_.declaredElements.iterator)
while (iterator1.hasNext) {
val elem = iterator1.next
if (!processor.execute(elem, state)) return false
}
true
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/base/types/ScRefinementImpl.scala | Scala | apache-2.0 | 1,085 |
package scalajsreact.template.components
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.html_<^._
object Footer {
val component = ScalaComponent.builder
.static("Footer")(
<.footer(
^.textAlign.center,
<.div(^.borderBottom := "1px solid grey", ^.padding := "0px"),
<.p(^.paddingTop := "5px", "Footer")
)
)
.build
def apply() = component()
}
| chandu0101/scalajs-react-template | src/main/scala/scalajsreact/template/components/Footer.scala | Scala | apache-2.0 | 417 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.catalog.BucketSpec
import org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning.ScanOperation
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.{FileSourceScanExec, SparkPlan}
import org.apache.spark.util.collection.BitSet
/**
* A strategy for planning scans over collections of files that might be partitioned or bucketed
* by user specified columns.
*
* At a high level planning occurs in several phases:
* - Split filters by when they need to be evaluated.
* - Prune the schema of the data requested based on any projections present. Today this pruning
* is only done on top level columns, but formats should support pruning of nested columns as
* well.
* - Construct a reader function by passing filters and the schema into the FileFormat.
* - Using a partition pruning predicates, enumerate the list of files that should be read.
* - Split the files into tasks and construct a FileScanRDD.
* - Add any projection or filters that must be evaluated after the scan.
*
* Files are assigned into tasks using the following algorithm:
* - If the table is bucketed, group files by bucket id into the correct number of partitions.
* - If the table is not bucketed or bucketing is turned off:
* - If any file is larger than the threshold, split it into pieces based on that threshold
* - Sort the files by decreasing file size.
* - Assign the ordered files to buckets using the following algorithm. If the current partition
* is under the threshold with the addition of the next file, add it. If not, open a new bucket
* and add it. Proceed to the next file.
*/
object FileSourceStrategy extends Strategy with Logging {
// should prune buckets iff num buckets is greater than 1 and there is only one bucket column
private def shouldPruneBuckets(bucketSpec: Option[BucketSpec]): Boolean = {
bucketSpec match {
case Some(spec) => spec.bucketColumnNames.length == 1 && spec.numBuckets > 1
case None => false
}
}
private def getExpressionBuckets(
expr: Expression,
bucketColumnName: String,
numBuckets: Int): BitSet = {
def getBucketNumber(attr: Attribute, v: Any): Int = {
BucketingUtils.getBucketIdFromValue(attr, numBuckets, v)
}
def getBucketSetFromIterable(attr: Attribute, iter: Iterable[Any]): BitSet = {
val matchedBuckets = new BitSet(numBuckets)
iter
.map(v => getBucketNumber(attr, v))
.foreach(bucketNum => matchedBuckets.set(bucketNum))
matchedBuckets
}
def getBucketSetFromValue(attr: Attribute, v: Any): BitSet = {
val matchedBuckets = new BitSet(numBuckets)
matchedBuckets.set(getBucketNumber(attr, v))
matchedBuckets
}
expr match {
case expressions.Equality(a: Attribute, Literal(v, _)) if a.name == bucketColumnName =>
getBucketSetFromValue(a, v)
case expressions.In(a: Attribute, list)
if list.forall(_.isInstanceOf[Literal]) && a.name == bucketColumnName =>
getBucketSetFromIterable(a, list.map(e => e.eval(EmptyRow)))
case expressions.InSet(a: Attribute, hset)
if hset.forall(_.isInstanceOf[Literal]) && a.name == bucketColumnName =>
getBucketSetFromIterable(a, hset.map(e => expressions.Literal(e).eval(EmptyRow)))
case expressions.IsNull(a: Attribute) if a.name == bucketColumnName =>
getBucketSetFromValue(a, null)
case expressions.And(left, right) =>
getExpressionBuckets(left, bucketColumnName, numBuckets) &
getExpressionBuckets(right, bucketColumnName, numBuckets)
case expressions.Or(left, right) =>
getExpressionBuckets(left, bucketColumnName, numBuckets) |
getExpressionBuckets(right, bucketColumnName, numBuckets)
case _ =>
val matchedBuckets = new BitSet(numBuckets)
matchedBuckets.setUntil(numBuckets)
matchedBuckets
}
}
private def genBucketSet(
normalizedFilters: Seq[Expression],
bucketSpec: BucketSpec): Option[BitSet] = {
if (normalizedFilters.isEmpty) {
return None
}
val bucketColumnName = bucketSpec.bucketColumnNames.head
val numBuckets = bucketSpec.numBuckets
val normalizedFiltersAndExpr = normalizedFilters
.reduce(expressions.And)
val matchedBuckets = getExpressionBuckets(normalizedFiltersAndExpr, bucketColumnName,
numBuckets)
val numBucketsSelected = matchedBuckets.cardinality()
logInfo {
s"Pruned ${numBuckets - numBucketsSelected} out of $numBuckets buckets."
}
// None means all the buckets need to be scanned
if (numBucketsSelected == numBuckets) {
None
} else {
Some(matchedBuckets)
}
}
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case ScanOperation(projects, filters,
l @ LogicalRelation(fsRelation: HadoopFsRelation, _, table, _)) =>
// Filters on this relation fall into four categories based on where we can use them to avoid
// reading unneeded data:
// - partition keys only - used to prune directories to read
// - bucket keys only - optionally used to prune files to read
// - keys stored in the data only - optionally used to skip groups of data in files
// - filters that need to be evaluated again after the scan
val filterSet = ExpressionSet(filters)
val normalizedFilters = DataSourceStrategy.normalizeExprs(filters, l.output)
val partitionColumns =
l.resolve(
fsRelation.partitionSchema, fsRelation.sparkSession.sessionState.analyzer.resolver)
val partitionSet = AttributeSet(partitionColumns)
val partitionKeyFilters =
ExpressionSet(normalizedFilters
.filter(_.references.subsetOf(partitionSet)))
logInfo(s"Pruning directories with: ${partitionKeyFilters.mkString(",")}")
// subquery expressions are filtered out because they can't be used to prune buckets or pushed
// down as data filters, yet they would be executed
val normalizedFiltersWithoutSubqueries =
normalizedFilters.filterNot(SubqueryExpression.hasSubquery)
val bucketSpec: Option[BucketSpec] = fsRelation.bucketSpec
val bucketSet = if (shouldPruneBuckets(bucketSpec)) {
genBucketSet(normalizedFiltersWithoutSubqueries, bucketSpec.get)
} else {
None
}
val dataColumns =
l.resolve(fsRelation.dataSchema, fsRelation.sparkSession.sessionState.analyzer.resolver)
// Partition keys are not available in the statistics of the files.
val dataFilters =
normalizedFiltersWithoutSubqueries.filter(_.references.intersect(partitionSet).isEmpty)
logInfo(s"Pushed Filters: " +
s"${dataFilters.flatMap(DataSourceStrategy.translateFilter).mkString(",")}")
// Predicates with both partition keys and attributes need to be evaluated after the scan.
val afterScanFilters = filterSet -- partitionKeyFilters.filter(_.references.nonEmpty)
logInfo(s"Post-Scan Filters: ${afterScanFilters.mkString(",")}")
val filterAttributes = AttributeSet(afterScanFilters)
val requiredExpressions: Seq[NamedExpression] = filterAttributes.toSeq ++ projects
val requiredAttributes = AttributeSet(requiredExpressions)
val readDataColumns =
dataColumns
.filter(requiredAttributes.contains)
.filterNot(partitionColumns.contains)
val outputSchema = readDataColumns.toStructType
logInfo(s"Output Data Schema: ${outputSchema.simpleString(5)}")
val outputAttributes = readDataColumns ++ partitionColumns
val scan =
FileSourceScanExec(
fsRelation,
outputAttributes,
outputSchema,
partitionKeyFilters.toSeq,
bucketSet,
dataFilters,
table.map(_.identifier))
val afterScanFilter = afterScanFilters.toSeq.reduceOption(expressions.And)
val withFilter = afterScanFilter.map(execution.FilterExec(_, scan)).getOrElse(scan)
val withProjections = if (projects == withFilter.output) {
withFilter
} else {
execution.ProjectExec(projects, withFilter)
}
withProjections :: Nil
case _ => Nil
}
}
| jkbradley/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala | Scala | apache-2.0 | 9,312 |
/* Title: Pure/System/process_result.scala
Author: Makarius
Result of system process.
*/
package isabelle
final case class Process_Result(
rc: Int,
out_lines: List[String] = Nil,
err_lines: List[String] = Nil,
timeout: Boolean = false,
timing: Timing = Timing.zero)
{
def out: String = cat_lines(out_lines)
def err: String = cat_lines(err_lines)
def errors(errs: List[String]): Process_Result = copy(err_lines = err_lines ::: errs)
def error(err: String): Process_Result = errors(List(err))
def was_timeout: Process_Result = copy(rc = 1, timeout = true)
def ok: Boolean = rc == 0
def interrupted: Boolean = rc == Exn.Interrupt.return_code
def check_rc(pred: Int => Boolean): Process_Result =
if (pred(rc)) this
else if (interrupted) throw Exn.Interrupt()
else Exn.error(err)
def check: Process_Result = check_rc(_ == 0)
def print: Process_Result =
{
Output.warning(err)
Output.writeln(out)
copy(out_lines = Nil, err_lines = Nil)
}
def print_stdout: Process_Result =
{
Output.warning(err, stdout = true)
Output.writeln(out, stdout = true)
copy(out_lines = Nil, err_lines = Nil)
}
def print_if(b: Boolean): Process_Result = if (b) print else this
def print_stdout_if(b: Boolean): Process_Result = if (b) print_stdout else this
}
| larsrh/libisabelle | modules/pide/2018/src/main/scala/System/process_result.scala | Scala | apache-2.0 | 1,331 |
/*
* Copyright 2015 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala.internal
import com.mongodb.MongoException
import org.mongodb.scala.{ Observable, Observer, Subscription }
object TestObservable {
def apply[A](from: Iterable[A]): TestObservable[A] = {
new TestObservable(IterableObservable[A](from))
}
def apply[A](from: Iterable[A], failOn: Int): TestObservable[A] = {
new TestObservable(IterableObservable[A](from), failOn)
}
def apply[A](from: Iterable[A], failOn: Int, errorMessage: String): TestObservable[A] = {
new TestObservable(IterableObservable[A](from), failOn, errorMessage)
}
}
case class TestObservable[A](
delegate: Observable[A] = IterableObservable[Int]((1 to 100).toStream),
failOn: Int = Int.MaxValue,
errorMessage: String = "Failed"
) extends Observable[A] {
var failed = false
override def subscribe(observer: Observer[_ >: A]): Unit = {
delegate.subscribe(
new Observer[A] {
var subscription: Option[Subscription] = None
override def onError(throwable: Throwable): Unit = observer.onError(throwable)
override def onSubscribe(sub: Subscription): Unit = {
subscription = Some(sub)
observer.onSubscribe(sub)
}
override def onComplete(): Unit = if (!failed) observer.onComplete()
override def onNext(tResult: A): Unit = {
if (!failed) {
if (tResult == failOn) {
failed = true
onError(new MongoException(errorMessage))
} else {
observer.onNext(tResult)
}
}
}
}
)
}
}
| anand-singh/mongo-scala-driver | driver/src/test/scala/org/mongodb/scala/internal/TestObservable.scala | Scala | apache-2.0 | 2,215 |
object Main {
def main(args: Array[String]) = {
println("Hi from aggregator!")
Main1.main(Array(""))
Main2.main(Array(""))
Main3.main(Array(""))
Main4.main(Array(""))
Main5.main(Array(""))
Main6.main(Array(""))
Main7.main(Array(""))
Main8.main(Array(""))
Main9.main(Array(""))
Main10.main(Array(""))
Main11.main(Array(""))
Main12.main(Array(""))
Main13.main(Array(""))
Main14.main(Array(""))
Main15.main(Array(""))
}
}
| darkocerdic/sbt-multiproject-resolving | aggregator/src/main/scala/Main.scala | Scala | apache-2.0 | 487 |
import scala.compiletime.erasedValue
import scala.deriving.Mirror
object Test extends App {
inline def checkElems[A, T](using inline A: Mirror.SumOf[A]): Unit =
inline erasedValue[A.MirroredElemTypes] match {
case _: T => ()
}
sealed trait Base1 // Base1 MUST NOT have a companion here!
case class Foo() extends Base1
case object Bar extends Base1
case class Qux(i: Int) extends Base1
checkElems[Base1, (Foo, Bar.type, Qux)]
enum Tree[+T] {
case Empty
case Branch[T](left: Tree[T], right: Tree[T]) extends Tree[T]
case Leaf[T](elem: T) extends Tree[T]
}
checkElems[Tree[String], (Tree.Empty.type, Tree.Branch[String], Tree.Leaf[String])]
}
| dotty-staging/dotty | tests/run/deriving-constructor-order.scala | Scala | apache-2.0 | 689 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.hbase
import java.io.{IOException, ObjectInputStream, ObjectOutputStream, ByteArrayInputStream}
import org.apache.avro.Schema
import org.apache.avro.generic.{GenericDatumWriter, GenericDatumReader, GenericRecord}
import org.apache.avro.io._
import org.apache.commons.io.output.ByteArrayOutputStream
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.{HColumnDescriptor, HTableDescriptor, TableName, HBaseConfiguration}
import org.apache.hadoop.hbase.client.{HBaseAdmin, Put, ConnectionFactory, Table}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types._
import org.apache.spark.sql.{SaveMode, DataFrame, Row, SQLContext}
import org.apache.spark.sql.sources._
import org.json4s.JsonAST.JValue
import org.json4s.jackson.JsonMethods._
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.util.control.NonFatal
/**
* val people = sqlContext.read.format("hbase").load("people")
*/
private[sql] class DefaultSource extends RelationProvider with CreatableRelationProvider {//with DataSourceRegister {
//override def shortName(): String = "hbase"
override def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String]): BaseRelation = {
HBaseRelation(parameters, None)(sqlContext)
}
override def createRelation(
sqlContext: SQLContext,
mode: SaveMode,
parameters: Map[String, String],
data: DataFrame): BaseRelation = {
val relation = HBaseRelation(parameters, Some(data.schema))(sqlContext)
relation.createTable()
relation.insert(data, false)
relation
}
}
case class HBaseRelation(
parameters: Map[String, String],
userSpecifiedschema: Option[StructType]
)(@transient val sqlContext: SQLContext)
extends BaseRelation with PrunedFilteredScan with InsertableRelation with Logging {
val timestamp = parameters.get(HBaseRelation.TIMESTAMP).map(_.toLong)
val minStamp = parameters.get(HBaseRelation.MIN_STAMP).map(_.toLong)
val maxStamp = parameters.get(HBaseRelation.MAX_STAMP).map(_.toLong)
val maxVersions = parameters.get(HBaseRelation.MAX_VERSIONS).map(_.toInt)
def createTable() {
if (catalog.numReg > 3) {
val tName = TableName.valueOf(catalog.name)
val cfs = catalog.getColumnFamilies
val connection = ConnectionFactory.createConnection(hbaseConf)
// Initialize hBase table if necessary
val admin = connection.getAdmin()
if (!admin.isTableAvailable(tName)) {
val tableDesc = new HTableDescriptor(tName)
cfs.foreach { x =>
val cf = new HColumnDescriptor(x.getBytes())
logDebug(s"add family $x to ${catalog.name}")
tableDesc.addFamily(cf)
}
val startKey = Bytes.toBytes("aaaaaaa");
val endKey = Bytes.toBytes("zzzzzzz");
val splitKeys = Bytes.split(startKey, endKey, catalog.numReg - 3);
admin.createTable(tableDesc, splitKeys)
val r = connection.getRegionLocator(TableName.valueOf(catalog.name)).getAllRegionLocations
while(r == null || r.size() == 0) {
logDebug(s"region not allocated")
Thread.sleep(1000)
}
logDebug(s"region allocated $r")
}
admin.close()
connection.close()
}
}
/**
*
* @param data
* @param overwrite
*/
override def insert(data: DataFrame, overwrite: Boolean): Unit = {
val jobConfig: JobConf = new JobConf(hbaseConf, this.getClass)
jobConfig.setOutputFormat(classOf[TableOutputFormat])
jobConfig.set(TableOutputFormat.OUTPUT_TABLE, catalog.name)
var count = 0
val rkFields = catalog.getRowKey
val rkIdxedFields = rkFields.map{ case x =>
(schema.fieldIndex(x.colName), x)
}
val colsIdxedFields = schema
.fieldNames
.partition( x => rkFields.map(_.colName).contains(x))
._2.map(x => (schema.fieldIndex(x), catalog.getField(x)))
val rdd = data.rdd //df.queryExecution.toRdd
def convertToPut(row: Row) = {
// construct bytes for row key
val rowBytes = rkIdxedFields.map { case (x, y) =>
Utils.toBytes(row(x), y)
}
val rLen = rowBytes.foldLeft(0) { case (x, y) =>
x + y.length
}
val rBytes = new Array[Byte](rLen)
var offset = 0
rowBytes.foreach { x =>
System.arraycopy(x, 0, rBytes, offset, x.length)
offset += x.length
}
val put = timestamp.fold(new Put(rBytes))(new Put(rBytes, _))
colsIdxedFields.foreach { case (x, y) =>
val b = Utils.toBytes(row(x), y)
put.addColumn(Bytes.toBytes(y.cf), Bytes.toBytes(y.col), b)
}
count += 1
(new ImmutableBytesWritable, put)
}
rdd.map(convertToPut(_)).saveAsHadoopDataset(jobConfig)
}
val catalog = HBaseTableCatalog(parameters)
val df: DataFrame = null
val testConf = sqlContext.sparkContext.conf.getBoolean(SparkHBaseConf.testConf, false)
@transient val hConf = {
if (testConf) {
SparkHBaseConf.conf
} else {
HBaseConfiguration.create
}
}
val wrappedConf = sqlContext.sparkContext.broadcast(new SerializableConfiguration(hConf))
def hbaseConf = wrappedConf.value.value
def rows = catalog.row
def singleKey = {
rows.fields.size == 1
}
def getField(name: String): Field = {
catalog.getField(name)
}
// check whether the column is the first key in the rowkey
def isPrimaryKey(c: String): Boolean = {
val f1 = catalog.getRowKey(0)
val f2 = getField(c)
f1 == f2
}
def isComposite(): Boolean = {
catalog.getRowKey.size > 1
}
def isColumn(c: String): Boolean = {
!catalog.getRowKey.map(_.colName).contains(c)
}
// Return the key that can be used as partition keys, which satisfying two conditions:
// 1: it has to be the row key
// 2: it has to be sequentially sorted without gap in the row key
def getRowColumns(c: Seq[Field]): Seq[Field] = {
catalog.getRowKey.zipWithIndex.filter { x =>
c.contains(x._1)
}.zipWithIndex.filter { x =>
x._1._2 == x._2
}.map(_._1._1)
}
def getIndexedProjections(requiredColumns: Array[String]): Seq[(Field, Int)] = {
requiredColumns.map(catalog.sMap.getField(_)).zipWithIndex
}
// Retrieve all columns we will return in the scanner
def splitRowKeyColumns(requiredColumns: Array[String]): (Seq[Field], Seq[Field]) = {
val (l, r) = requiredColumns.map(catalog.sMap.getField(_)).partition(_.cf == HBaseTableCatalog.rowKey)
(l, r)
}
override val schema: StructType = userSpecifiedschema.getOrElse(catalog.toDataType)
def buildScan(requiredColumns: Array[String], filters: Array[Filter]): RDD[Row] = {
new HBaseTableScanRDD(this, requiredColumns, filters)
}
}
class SerializableConfiguration(@transient var value: Configuration) extends Serializable {
private def writeObject(out: ObjectOutputStream): Unit = tryOrIOException {
out.defaultWriteObject()
value.write(out)
}
private def readObject(in: ObjectInputStream): Unit = tryOrIOException {
value = new Configuration(false)
value.readFields(in)
}
def tryOrIOException(block: => Unit) {
try {
block
} catch {
case e: IOException => throw e
case NonFatal(t) => throw new IOException(t)
}
}
}
object HBaseRelation {
val TIMESTAMP = "timestamp"
val MIN_STAMP = "minStamp"
val MAX_STAMP = "maxStamp"
val MAX_VERSIONS = "maxVersions"
}
| zhzhan/shc | src/main/scala/org/apache/spark/sql/execution/datasources/hbase/HBaseRelation.scala | Scala | apache-2.0 | 8,541 |
package aio
package conduit
import buffer.ByteResult
/**
*
*/
trait ByteResultSourceConduit
extends SourceConduit[ByteResult]
/**
*
*/
trait ByteResultSinkConduit
extends SinkConduit[ByteResult]
| weltermann17/pleasant-scala | aio/src/main/scala/aio/conduit/ByteResultConduit.scala | Scala | apache-2.0 | 208 |
package com.sksamuel.elastic4s.requests.indexes
import com.sksamuel.elastic4s.ext.OptionImplicits._
import com.sksamuel.elastic4s.requests.searches.queries.Query
case class TemplateAlias(name: String, filter: Option[Query] = None, routing: Option[String] = None) {
def filter(filter: Query): TemplateAlias = copy(filter = filter.some)
def routing(routing: String): TemplateAlias = copy(routing = routing.some)
}
| sksamuel/elastic4s | elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/indexes/TemplateAlias.scala | Scala | apache-2.0 | 418 |
Subsets and Splits