code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.geishatokyo.sqlgen.process.save.xls
import java.io.FileOutputStream
import com.geishatokyo.sqlgen.process.{Context, OutputSupport, Proc}
/**
* Created by takezoux2 on 2017/07/05.
*/
class XLSSaveProc(dir: String) extends Proc with XLSConverter with OutputSupport{
def isXlsx = true
def extension = if(isXlsx) ".xlsx" else ".xls"
override def apply(c: Context): Context = {
val xlsWb = toHSSFSheet(c.workbook, isXlsx)
val path = getPath(c, dir, c.workbook.name + extension)
val output = new FileOutputStream(path)
try {
xlsWb.write(output)
}finally {
output.close()
}
c
}
}
| geishatokyo/sql-generator | src/main/scala/com/geishatokyo/sqlgen/process/save/xls/XLSSaveProc.scala | Scala | mit | 643 |
package testfeature
import feature._
import feature.{MessengerRNA, Minus, Plus}
import org.scalatest.FunSuite
/**
* Created by prussell on 11/12/16.
*/
class MessengerRNAMiscSuite extends FunSuite {
test("Illegal orientation") {
intercept[IllegalArgumentException](MessengerRNA(chr1_1000_2000_both, 1000, 1012, Some("name"), Some("gene")))
}
test("Illegal names") {
intercept[IllegalArgumentException](MessengerRNA(chr1_1000_2000_plus_1, 1000, 1012, Some(""), Some("gene")))
intercept[IllegalArgumentException](MessengerRNA(chr1_1000_2000_plus_1, 1000, 1012, Some("name"), Some("")))
}
test("Illegal CDS") {
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 110, 115, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 199, 301, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 99, 150, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 150, 140, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 300, 401, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 250, 300, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 150, 401, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 50, 60, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 450, 460, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 150, 150, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 250, 260, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 299, 310, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 150, 201, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 200, 350, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 200, 300, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 150, 300, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 150, 160, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_plus, 190, 310, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 110, 115, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 199, 301, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 99, 150, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 150, 140, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 300, 401, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 250, 300, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 150, 401, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 50, 60, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 450, 460, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 150, 150, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 250, 260, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 299, 310, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 150, 201, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 200, 350, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 200, 300, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 150, 300, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 150, 160, Some("name"), Some("gene")) }
intercept[IllegalArgumentException]{ MessengerRNA(chr1_100_200_300_400_minus, 190, 310, Some("name"), Some("gene")) }
}
test("getCDS") {
// mRNA with one block
// Strictly contained
assert(MessengerRNA(chr1_1000_2000_minus, 1500, 1599, Some("name"), Some("gene"))
.getCDS === Block("1", 1500, 1599, Minus))
assert(MessengerRNA(chr1_1000_2000_plus, 1500, 1599, Some("name"), Some("gene"))
.getCDS === Block("1", 1500, 1599, Plus))
// CDS is whole mRNA
assert(MessengerRNA(Block("1", 1000, 1999, Minus), 1000, 1999, Some("name"), Some("gene"))
.getCDS === Block("1", 1000, 1999, Minus))
assert(MessengerRNA(Block("1", 1000, 1999, Plus), 1000, 1999, Some("name"), Some("gene"))
.getCDS === Block("1", 1000, 1999, Plus))
// mRNA and CDS share one endpoint
assert(MessengerRNA(chr1_1000_2000_minus, 1000, 1300, Some("name"), Some("gene"))
.getCDS === Block("1", 1000, 1300, Minus))
assert(MessengerRNA(chr1_1000_2000_minus, 1700, 2000, Some("name"), Some("gene"))
.getCDS === Block("1", 1700, 2000, Minus))
assert(MessengerRNA(chr1_1000_2000_plus, 1000, 1300, Some("name"), Some("gene"))
.getCDS === Block("1", 1000, 1300, Plus))
assert(MessengerRNA(chr1_1000_2000_plus, 1700, 2000, Some("name"), Some("gene"))
.getCDS === Block("1", 1700, 2000, Plus))
// mRNA with two blocks
// Strictly contained in one exon
assert(MessengerRNA(chr1_100_200_300_400_plus, 110, 122, Some("name"), Some("gene")).getCDS
=== Block("1", 110, 122, Plus))
assert(MessengerRNA(chr1_100_200_300_400_minus, 310, 322, Some("name"), Some("gene")).getCDS
=== Block("1", 310, 322, Minus))
// Strictly contained in two exons
assert(MessengerRNA(chr1_100_200_300_400_plus, 150, 349, Some("name"), Some("gene")).getCDS
=== BlockSet(List(Block("1", 150, 200, Plus), Block("1", 300, 349, Plus))))
// CDS equal to one exon
assert(MessengerRNA(BlockSet(List(Block("1", 100, 199, Plus), Block("1", 300, 400, Plus))),
100, 199, Some("name"), Some("gene")).getCDS
=== Block("1", 100, 199, Plus))
assert(MessengerRNA(BlockSet(List(Block("1", 100, 200, Plus), Block("1", 300, 399, Plus))),
300, 399, Some("name"), Some("gene")).getCDS
=== Block("1", 300, 399, Plus))
// CDS equal to entire mRNA
assert(MessengerRNA(BlockSet(List(Block("1", 100, 199, Plus), Block("1", 300, 399, Plus))),
100, 399, Some("name"), Some("gene")).getCDS
=== BlockSet(List(Block("1", 100, 199, Plus), Block("1", 300, 399, Plus))))
// CDS sharing an endpoint with mRNA, contained in one exon
assert(MessengerRNA(chr1_100_200_300_400_minus, 100, 139, Some("name"), Some("gene")).getCDS
=== Block("1", 100, 139, Minus))
assert(MessengerRNA(chr1_100_200_300_400_plus, 370, 400, Some("name"), Some("gene")).getCDS
=== Block("1", 370, 400, Plus))
// CDS sharing an endpoint with mRNA, contained in two exons
assert(MessengerRNA(chr1_100_200_300_400_plus, 180, 400, Some("name"), Some("gene")).getCDS
=== BlockSet(List(Block("1", 180, 200, Plus), Block("1", 300, 400, Plus))))
assert(MessengerRNA(chr1_100_200_300_400_plus, 100, 350, Some("name"), Some("gene")).getCDS
=== BlockSet(List(Block("1", 100, 200, Plus), Block("1", 300, 350, Plus))))
// CDS sharing an inner endpoint with one exon, contained in exon
assert(MessengerRNA(chr1_100_200_300_400_plus, 170, 200, Some("name"), Some("gene")).getCDS
=== Block("1", 170, 200, Plus))
assert(MessengerRNA(chr1_100_200_300_400_plus, 300, 330, Some("name"), Some("gene")).getCDS
=== Block("1", 300, 330, Plus))
// mRNA with four blocks
// Strictly contained in one exon
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 1500, 1599, Some("name"), Some("gene")).getCDS
=== Block("1", 1500, 1599, Minus))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 3500, 3599, Some("name"), Some("gene")).getCDS
=== Block("1", 3500, 3599, Plus))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 5500, 5599, Some("name"), Some("gene")).getCDS
=== Block("1", 5500, 5599, Minus))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 7500, 7599, Some("name"), Some("gene")).getCDS
=== Block("1", 7500, 7599, Plus))
// Strictly contained in two exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1500, 3499, Some("name"), Some("gene")).getCDS
=== BlockSet(List(Block("1", 1500, 2000, Plus), Block("1", 3000, 3499, Plus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3500, 5499, Some("name"), Some("gene")).getCDS
=== BlockSet(List(Block("1", 3500, 4000, Minus), Block("1", 5000, 5499, Minus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 5500, 7499, Some("name"), Some("gene")).getCDS
=== BlockSet(List(Block("1", 5500, 6000, Plus), Block("1", 7000, 7499, Plus))))
// Strictly contained in three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1500, 5501, Some("name"), Some("gene")).getCDS
=== BlockSet(List(Block("1", 1500, 2000, Plus),
Block("1", 3000, 4000, Plus),
Block("1", 5000, 5501, Plus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3500, 7501, Some("name"), Some("gene")).getCDS
=== BlockSet(List(Block("1", 3500, 4000, Minus),
Block("1", 5000, 6000, Minus),
Block("1", 7000, 7501, Minus))))
// Strictly contained in four exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1500, 7500, Some("name"), Some("gene")).getCDS
=== BlockSet(List(
Block("1", 1500, 2000, Plus),
Block("1", 3000, 4000, Plus),
Block("1", 5000, 6000, Plus),
Block("1", 7000, 7500, Plus))))
// CDS equal to three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1000, 6000, Some("name"), Some("gene")).getCDS
=== BlockSet(List(
Block("1", 1000, 2000, Plus),
Block("1", 3000, 4000, Plus),
Block("1", 5000, 6000, Plus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3000, 8000, Some("name"), Some("gene")).getCDS
=== BlockSet(List(
Block("1", 3000, 4000, Minus),
Block("1", 5000, 6000, Minus),
Block("1", 7000, 8000, Minus))))
// CDS sharing an endpoint with mRNA, contained in one exon
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 1000, 1600, Some("name"), Some("gene")).getCDS
=== Block("1", 1000, 1600, Minus))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 7400, 8000, Some("name"), Some("gene")).getCDS
=== Block("1", 7400, 8000, Plus))
// CDS sharing an endpoint with mRNA, contained in two exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1000, 3500, Some("name"), Some("gene")).getCDS
=== BlockSet(List(
Block("1", 1000, 2000, Plus),
Block("1", 3000, 3500, Plus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 5500, 8000, Some("name"), Some("gene")).getCDS
=== BlockSet(List(
Block("1", 5500, 6000, Minus),
Block("1", 7000, 8000, Minus))))
// CDS sharing an endpoint with mRNA, contained in three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1000, 5400, Some("name"), Some("gene")).getCDS
=== BlockSet(List(
Block("1", 1000, 2000, Plus),
Block("1", 3000, 4000, Plus),
Block("1", 5000, 5400, Plus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3600, 8000, Some("name"), Some("gene")).getCDS
=== BlockSet(List(
Block("1", 3600, 4000, Minus),
Block("1", 5000, 6000, Minus),
Block("1", 7000, 8000, Minus))))
// CDS sharing an endpoint with mRNA, contained in four exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1000, 7600, Some("name"), Some("gene")).getCDS
=== BlockSet(List(
Block("1", 1000, 2000, Plus),
Block("1", 3000, 4000, Plus),
Block("1", 5000, 6000, Plus),
Block("1", 7000, 7600, Plus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 1400, 8000, Some("name"), Some("gene")).getCDS
=== BlockSet(List(
Block("1", 1400, 2000, Minus),
Block("1", 3000, 4000, Minus),
Block("1", 5000, 6000, Minus),
Block("1", 7000, 8000, Minus))))
// CDS sharing an inner endpoint with one exon, contained in exon
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1400, 2000, Some("name"), Some("gene")).getCDS
=== Block("1", 1400, 2000, Plus))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3000, 3600, Some("name"), Some("gene")).getCDS
=== Block("1", 3000, 3600, Minus))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 5400, 6000, Some("name"), Some("gene")).getCDS
=== Block("1", 5400, 6000, Plus))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 7000, 7600, Some("name"), Some("gene")).getCDS
=== Block("1", 7000, 7600, Minus))
// CDS sharing an inner endpoint with one exon, contained in two exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1500, 4000, Some("name"), Some("gene")).getCDS
=== BlockSet(List(
Block("1", 1500, 2000, Plus),
Block("1", 3000, 4000, Plus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3500, 6000, Some("name"), Some("gene")).getCDS
=== BlockSet(List(
Block("1", 3500, 4000, Minus),
Block("1", 5000, 6000, Minus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 3000, 5500, Some("name"), Some("gene")).getCDS
=== BlockSet(List(
Block("1", 3000, 4000, Plus),
Block("1", 5000, 5500, Plus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 5000, 7500, Some("name"), Some("gene")).getCDS
=== BlockSet(List(
Block("1", 5000, 6000, Minus),
Block("1", 7000, 7500, Minus))))
// CDS sharing an inner endpoint with one exon, contained three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1600, 6000, Some("name"), Some("gene")).getCDS
=== BlockSet(List(
Block("1", 1600, 2000, Plus),
Block("1", 3000, 4000, Plus),
Block("1", 5000, 6000, Plus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 3000, 7400, Some("name"), Some("gene")).getCDS
=== BlockSet(List(
Block("1", 3000, 4000, Plus),
Block("1", 5000, 6000, Plus),
Block("1", 7000, 7400, Plus))))
}
test("get 3'-UTR") {
// mRNA with one block
// Strictly contained
assert(MessengerRNA(chr1_1000_2000_minus, 1500, 1599, Some("name"), Some("gene"))
.get3UTR === Some(Block("1", 1000, 1500, Minus)))
assert(MessengerRNA(chr1_1000_2000_plus, 1500, 1599, Some("name"), Some("gene"))
.get3UTR === Some(Block("1", 1599, 2000, Plus)))
// CDS is whole mRNA
assert(MessengerRNA(Block("1", 1000, 1999, Minus), 1000, 1999, Some("name"), Some("gene"))
.get3UTR === None)
assert(MessengerRNA(Block("1", 1000, 1999, Plus), 1000, 1999, Some("name"), Some("gene"))
.get3UTR === None)
// mRNA and CDS share one endpoint
assert(MessengerRNA(chr1_1000_2000_minus, 1000, 1300, Some("name"), Some("gene"))
.get3UTR === None)
assert(MessengerRNA(chr1_1000_2000_minus, 1700, 2000, Some("name"), Some("gene"))
.get3UTR === Some(Block("1", 1000, 1700, Minus)))
assert(MessengerRNA(chr1_1000_2000_plus, 1000, 1300, Some("name"), Some("gene"))
.get3UTR === Some(Block("1", 1300, 2000, Plus)))
assert(MessengerRNA(chr1_1000_2000_plus, 1700, 2000, Some("name"), Some("gene"))
.get3UTR === None)
// mRNA with two blocks
// Strictly contained in one exon
assert(MessengerRNA(chr1_100_200_300_400_plus, 110, 122, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 122, 200, Plus),
Block("1", 300, 400, Plus)))))
assert(MessengerRNA(chr1_100_200_300_400_minus, 310, 322, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 100, 200, Minus),
Block("1", 300, 310, Minus)))))
// Strictly contained in two exons
assert(MessengerRNA(chr1_100_200_300_400_plus, 150, 349, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 349, 400, Plus)))
// CDS equal to one exon
assert(MessengerRNA(BlockSet(List(Block("1", 100, 199, Plus), Block("1", 300, 400, Plus))),
100, 199, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 300, 400, Plus)))
assert(MessengerRNA(BlockSet(List(Block("1", 100, 200, Plus), Block("1", 300, 399, Plus))),
300, 399, Some("name"), Some("gene")).get3UTR
=== None)
// CDS equal to entire mRNA
assert(MessengerRNA(BlockSet(List(Block("1", 100, 199, Plus), Block("1", 300, 399, Plus))),
100, 399, Some("name"), Some("gene")).get3UTR
=== None)
// CDS sharing an endpoint with mRNA, contained in one exon
assert(MessengerRNA(chr1_100_200_300_400_minus, 100, 139, Some("name"), Some("gene")).get3UTR
=== None)
assert(MessengerRNA(chr1_100_200_300_400_plus, 370, 400, Some("name"), Some("gene")).get3UTR
=== None)
// CDS sharing an endpoint with mRNA, contained in two exons
assert(MessengerRNA(chr1_100_200_300_400_plus, 180, 400, Some("name"), Some("gene")).get3UTR
=== None)
assert(MessengerRNA(chr1_100_200_300_400_plus, 100, 350, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 350, 400, Plus)))
// CDS sharing an inner endpoint with one exon, contained in exon
assert(MessengerRNA(chr1_100_200_300_400_plus, 170, 200, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 300, 400, Plus)))
assert(MessengerRNA(chr1_100_200_300_400_plus, 300, 330, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 330, 400, Plus)))
// mRNA with four blocks
// Strictly contained in one exon
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 1500, 1599, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 1000, 1500, Minus)))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 3500, 3599, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 3599, 4000, Plus),
Block("1", 5000, 6000, Plus),
Block("1", 7000, 8000, Plus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 5500, 5599, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 1000, 2000, Minus),
Block("1", 3000, 4000, Minus),
Block("1", 5000, 5500, Minus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 7500, 7599, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 7599, 8000, Plus)))
// Strictly contained in two exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1500, 3499, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 3499, 4000, Plus),
Block("1", 5000, 6000, Plus),
Block("1", 7000, 8000, Plus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3500, 5499, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 1000, 2000, Minus),
Block("1", 3000, 3500, Minus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 5500, 7499, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 7499, 8000, Plus)))
// Strictly contained in three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1500, 5501, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 5501, 6000, Plus),
Block("1", 7000, 8000, Plus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3500, 7501, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 1000, 2000, Minus),
Block("1", 3000, 3500, Minus)))))
// Strictly contained in four exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1500, 7500, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 7500, 8000, Plus)))
// CDS equal to three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1000, 6000, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 7000, 8000, Plus)))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3000, 8000, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 1000, 2000, Minus)))
// CDS sharing an endpoint with mRNA, contained in one exon
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 1000, 1600, Some("name"), Some("gene")).get3UTR
=== None)
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 7400, 8000, Some("name"), Some("gene")).get3UTR
=== None)
// CDS sharing an endpoint with mRNA, contained in two exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1000, 3500, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 3500, 4000, Plus),
Block("1", 5000, 6000, Plus),
Block("1", 7000, 8000, Plus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 5500, 8000, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 1000, 2000, Minus),
Block("1", 3000, 4000, Minus),
Block("1", 5000, 5500, Minus)))))
// CDS sharing an endpoint with mRNA, contained in three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1000, 5400, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 5400, 6000, Plus),
Block("1", 7000, 8000, Plus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3600, 8000, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 1000, 2000, Minus),
Block("1", 3000, 3600, Minus)))))
// CDS sharing an endpoint with mRNA, contained in four exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1000, 7600, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 7600, 8000, Plus)))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 1400, 8000, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 1000, 1400, Minus)))
// CDS sharing an inner endpoint with one exon, contained in exon
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1400, 2000, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 3000, 4000, Plus),
Block("1", 5000, 6000, Plus),
Block("1", 7000, 8000, Plus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3000, 3600, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 1000, 2000, Minus)))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 5400, 6000, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 7000, 8000, Plus)))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 7000, 7600, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 1000, 2000, Minus),
Block("1", 3000, 4000, Minus),
Block("1", 5000, 6000, Minus)))))
// CDS sharing an inner endpoint with one exon, contained in two exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1500, 4000, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 5000, 6000, Plus),
Block("1", 7000, 8000, Plus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3500, 6000, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 1000, 2000, Minus),
Block("1", 3000, 3500, Minus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 3000, 5500, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 5500, 6000, Plus),
Block("1", 7000, 8000, Plus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 5000, 7500, Some("name"), Some("gene")).get3UTR
=== Some(BlockSet(List(
Block("1", 1000, 2000, Minus),
Block("1", 3000, 4000, Minus)))))
// CDS sharing an inner endpoint with one exon, contained three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1600, 6000, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 7000, 8000, Plus)))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 3000, 7400, Some("name"), Some("gene")).get3UTR
=== Some(Block("1", 7400, 8000, Plus)))
}
test("get 5’-UTR") {
// mRNA with one block
// Strictly contained
assert(MessengerRNA(chr1_1000_2000_minus, 1500, 1599, Some("name"), Some("gene"))
.get5UTR === Some(Block("1", 1599, 2000, Minus)))
assert(MessengerRNA(chr1_1000_2000_plus, 1500, 1599, Some("name"), Some("gene"))
.get5UTR === Some(Block("1", 1000, 1500, Plus)))
// CDS is whole mRNA
assert(MessengerRNA(Block("1", 1000, 1999, Minus), 1000, 1999, Some("name"), Some("gene"))
.get5UTR === None)
assert(MessengerRNA(Block("1", 1000, 1999, Plus), 1000, 1999, Some("name"), Some("gene"))
.get5UTR === None)
// mRNA and CDS share one endpoint
assert(MessengerRNA(chr1_1000_2000_minus, 1000, 1300, Some("name"), Some("gene"))
.get5UTR === Some(Block("1", 1300, 2000, Minus)))
assert(MessengerRNA(chr1_1000_2000_minus, 1700, 2000, Some("name"), Some("gene"))
.get5UTR === None)
assert(MessengerRNA(chr1_1000_2000_plus, 1000, 1300, Some("name"), Some("gene"))
.get5UTR === None)
assert(MessengerRNA(chr1_1000_2000_plus, 1700, 2000, Some("name"), Some("gene"))
.get5UTR === Some(Block("1", 1000, 1700, Plus)))
// mRNA with two blocks
// Strictly contained in one exon
assert(MessengerRNA(chr1_100_200_300_400_plus, 110, 122, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 100, 110, Plus)))
assert(MessengerRNA(chr1_100_200_300_400_minus, 310, 322, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 322, 400, Minus)))
// Strictly contained in two exons
assert(MessengerRNA(chr1_100_200_300_400_plus, 150, 349, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 100, 150, Plus)))
// CDS equal to one exon
assert(MessengerRNA(BlockSet(List(Block("1", 100, 199, Plus), Block("1", 300, 400, Plus))),
100, 199, Some("name"), Some("gene")).get5UTR
=== None)
assert(MessengerRNA(BlockSet(List(Block("1", 100, 200, Plus), Block("1", 300, 399, Plus))),
300, 399, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 100, 200, Plus)))
// CDS equal to entire mRNA
assert(MessengerRNA(BlockSet(List(Block("1", 100, 199, Plus), Block("1", 300, 399, Plus))),
100, 399, Some("name"), Some("gene")).get5UTR
=== None)
// CDS sharing an endpoint with mRNA, contained in one exon
assert(MessengerRNA(chr1_100_200_300_400_minus, 100, 139, Some("name"), Some("gene")).get5UTR
=== Some(BlockSet(List(
Block("1", 139, 200, Minus),
Block("1", 300, 400, Minus)))))
assert(MessengerRNA(chr1_100_200_300_400_plus, 370, 400, Some("name"), Some("gene")).get5UTR
=== Some(BlockSet(List(
Block("1", 100, 200, Plus),
Block("1", 300, 370, Plus)))))
// CDS sharing an endpoint with mRNA, contained in two exons
assert(MessengerRNA(chr1_100_200_300_400_plus, 180, 400, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 100, 180, Plus)))
assert(MessengerRNA(chr1_100_200_300_400_plus, 100, 350, Some("name"), Some("gene")).get5UTR
=== None)
// CDS sharing an inner endpoint with one exon, contained in exon
assert(MessengerRNA(chr1_100_200_300_400_plus, 170, 200, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 100, 170, Plus)))
assert(MessengerRNA(chr1_100_200_300_400_plus, 300, 330, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 100, 200, Plus)))
// mRNA with four blocks
// Strictly contained in one exon
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 1500, 1599, Some("name"), Some("gene")).get5UTR
=== Some(BlockSet(List(
Block("1", 1599, 2000, Minus),
Block("1", 3000, 4000, Minus),
Block("1", 5000, 6000, Minus),
Block("1", 7000, 8000, Minus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 3500, 3599, Some("name"), Some("gene")).get5UTR
=== Some(BlockSet(List(
Block("1", 1000, 2000, Plus),
Block("1", 3000, 3500, Plus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 5500, 5599, Some("name"), Some("gene")).get5UTR
=== Some(BlockSet(List(
Block("1", 5599, 6000, Minus),
Block("1", 7000, 8000, Minus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 7500, 7599, Some("name"), Some("gene")).get5UTR
=== Some(BlockSet(List(
Block("1", 1000, 2000, Plus),
Block("1", 3000, 4000, Plus),
Block("1", 5000, 6000, Plus),
Block("1", 7000, 7500, Plus)))))
// Strictly contained in two exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1500, 3499, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 1000, 1500, Plus)))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3500, 5499, Some("name"), Some("gene")).get5UTR
=== Some(BlockSet(List(
Block("1", 5499, 6000, Minus),
Block("1", 7000, 8000, Minus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 5500, 7499, Some("name"), Some("gene")).get5UTR
=== Some(BlockSet(List(
Block("1", 1000, 2000, Plus),
Block("1", 3000, 4000, Plus),
Block("1", 5000, 5500, Plus)))))
// Strictly contained in three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1500, 5501, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 1000, 1500, Plus)))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3500, 7501, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 7501, 8000, Minus)))
// Strictly contained in four exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1500, 7500, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 1000, 1500, Plus)))
// CDS equal to three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1000, 6000, Some("name"), Some("gene")).get5UTR
=== None)
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3000, 8000, Some("name"), Some("gene")).get5UTR
=== None)
// CDS sharing an endpoint with mRNA, contained in one exon
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 1000, 1600, Some("name"), Some("gene")).get5UTR
=== Some(BlockSet(List(
Block("1", 1600, 2000, Minus),
Block("1", 3000, 4000, Minus),
Block("1", 5000, 6000, Minus),
Block("1", 7000, 8000, Minus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 7400, 8000, Some("name"), Some("gene")).get5UTR
=== Some(BlockSet(List(
Block("1", 1000, 2000, Plus),
Block("1", 3000, 4000, Plus),
Block("1", 5000, 6000, Plus),
Block("1", 7000, 7400, Plus)))))
// CDS sharing an endpoint with mRNA, contained in two exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1000, 3500, Some("name"), Some("gene")).get5UTR
=== None)
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 5500, 8000, Some("name"), Some("gene")).get5UTR
=== None)
// CDS sharing an endpoint with mRNA, contained in three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1000, 5400, Some("name"), Some("gene")).get5UTR
=== None)
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3600, 8000, Some("name"), Some("gene")).get5UTR
=== None)
// CDS sharing an endpoint with mRNA, contained in four exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1000, 7600, Some("name"), Some("gene")).get5UTR
=== None)
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 1400, 8000, Some("name"), Some("gene")).get5UTR
=== None)
// CDS sharing an inner endpoint with one exon, contained in exon
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1400, 2000, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 1000, 1400, Plus)))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3000, 3600, Some("name"), Some("gene")).get5UTR
=== Some(BlockSet(List(
Block("1", 3600, 4000, Minus),
Block("1", 5000, 6000, Minus),
Block("1", 7000, 8000, Minus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 5400, 6000, Some("name"), Some("gene")).get5UTR
=== Some(BlockSet(List(
Block("1", 1000, 2000, Plus),
Block("1", 3000, 4000, Plus),
Block("1", 5000, 5400, Plus)))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 7000, 7600, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 7600, 8000, Minus)))
// CDS sharing an inner endpoint with one exon, contained in two exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1500, 4000, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 1000, 1500, Plus)))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3500, 6000, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 7000, 8000, Minus)))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 3000, 5500, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 1000, 2000, Plus)))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 5000, 7500, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 7500, 8000, Minus)))
// CDS sharing an inner endpoint with one exon, contained three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1600, 6000, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 1000, 1600, Plus)))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 3000, 7400, Some("name"), Some("gene")).get5UTR
=== Some(Block("1", 1000, 2000, Plus)))
}
test("get start codon") {
// mRNA with one block
// Strictly contained
assert(MessengerRNA(chr1_1000_2000_minus, 1500, 1599, Some("name"), Some("gene"))
.getStartCodon === Block("1", 1596, 1599, Minus))
assert(MessengerRNA(chr1_1000_2000_plus, 1500, 1599, Some("name"), Some("gene"))
.getStartCodon === Block("1", 1500, 1503, Plus))
// CDS is whole mRNA
assert(MessengerRNA(Block("1", 1000, 1999, Minus), 1000, 1999, Some("name"), Some("gene"))
.getStartCodon === Block("1", 1996, 1999, Minus))
assert(MessengerRNA(Block("1", 1000, 1999, Plus), 1000, 1999, Some("name"), Some("gene"))
.getStartCodon === Block("1", 1000, 1003, Plus))
// mRNA with two blocks
// Strictly contained in one exon
assert(MessengerRNA(chr1_100_200_300_400_plus, 110, 122, Some("name"), Some("gene")).getStartCodon
=== Block("1", 110, 113, Plus))
assert(MessengerRNA(chr1_100_200_300_400_minus, 310, 322, Some("name"), Some("gene")).getStartCodon
=== Block("1", 319, 322, Minus))
// Strictly contained in two exons
assert(MessengerRNA(chr1_100_200_300_400_plus, 198, 349, Some("name"), Some("gene")).getStartCodon
=== BlockSet(List(
Block("1", 198, 200, Plus),
Block("1", 300, 301, Plus))))
// CDS sharing an endpoint with mRNA, contained in one exon
assert(MessengerRNA(chr1_100_200_300_400_minus, 100, 199, Some("name"), Some("gene")).getStartCodon
=== Block("1", 196, 199, Minus))
assert(MessengerRNA(chr1_100_200_300_400_plus, 370, 400, Some("name"), Some("gene")).getStartCodon
=== Block("1", 370, 373, Plus))
// mRNA with four blocks
// Strictly contained in two exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1500, 3499, Some("name"), Some("gene")).getStartCodon
=== Block("1", 1500, 1503, Plus))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3980, 5001, Some("name"), Some("gene")).getStartCodon
=== BlockSet(List(
Block("1", 3998, 4000, Minus),
Block("1", 5000, 5001, Minus))))
// Strictly contained in three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1998, 5198, Some("name"), Some("gene")).getStartCodon
=== BlockSet(List(
Block("1", 1998, 2000, Plus),
Block("1", 3000, 3001, Plus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3500, 7501, Some("name"), Some("gene")).getStartCodon
=== Block("1", 7498, 7501, Minus))
// CDS equal to three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1000, 6000, Some("name"), Some("gene")).getStartCodon
=== Block("1", 1000, 1003, Plus))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3000, 8000, Some("name"), Some("gene")).getStartCodon
=== Block("1", 7997, 8000, Minus))
// CDS sharing an endpoint with mRNA, contained in one exon
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 1000, 1600, Some("name"), Some("gene")).getStartCodon
=== Block("1", 1597, 1600, Minus))
// CDS sharing an inner endpoint with one exon, contained in two exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1998, 4000, Some("name"), Some("gene")).getStartCodon
=== BlockSet(List(
Block("1", 1998, 2000, Plus),
Block("1", 3000, 3001, Plus))))
// Start codon spread over 3 exons
assert(MessengerRNA(BlockSet(
List(Block("1", 1000, 2000, Plus),
Block("1", 3000, 3001, Plus),
Block("1", 4000, 5000, Plus)
)), 1999, 4010, Some("name"), Some("gene")).getStartCodon === BlockSet(List(
Block("1", 1999, 2000, Plus),
Block("1", 3000, 3001, Plus),
Block("1", 4000, 4001, Plus))))
assert(MessengerRNA(BlockSet(
List(Block("1", 1000, 2000, Minus),
Block("1", 3000, 3001, Minus),
Block("1", 4000, 5000, Minus)
)), 1990, 4001, Some("name"), Some("gene")).getStartCodon === BlockSet(List(
Block("1", 1999, 2000, Minus),
Block("1", 3000, 3001, Minus),
Block("1", 4000, 4001, Minus))))
assert(MessengerRNA(BlockSet(
List(Block("1", 1000, 2000, Plus),
Block("1", 3000, 3001, Plus),
Block("1", 4000, 4001, Plus),
Block("1", 5000, 5001, Plus),
Block("1", 6000, 7000, Plus)
)), 3000, 6300, Some("name"), Some("gene")).getStartCodon === BlockSet(List(
Block("1", 3000, 3001, Plus),
Block("1", 4000, 4001, Plus),
Block("1", 5000, 5001, Plus))))
assert(MessengerRNA(BlockSet(
List(Block("1", 1000, 2000, Minus),
Block("1", 3000, 3001, Minus),
Block("1", 4000, 4001, Minus),
Block("1", 5000, 5001, Minus),
Block("1", 6000, 7000, Minus)
)), 1700, 5001, Some("name"), Some("gene")).getStartCodon === BlockSet(List(
Block("1", 3000, 3001, Minus),
Block("1", 4000, 4001, Minus),
Block("1", 5000, 5001, Minus))))
}
test("get stop codon") {
// mRNA with one block
// Strictly contained
assert(MessengerRNA(chr1_1000_2000_minus, 1500, 1599, Some("name"), Some("gene"))
.getStopCodon === Block("1", 1500, 1503, Minus))
assert(MessengerRNA(chr1_1000_2000_plus, 1500, 1599, Some("name"), Some("gene"))
.getStopCodon === Block("1", 1596, 1599, Plus))
// CDS is whole mRNA
assert(MessengerRNA(Block("1", 1000, 1999, Minus), 1000, 1999, Some("name"), Some("gene"))
.getStopCodon === Block("1", 1000, 1003, Minus))
assert(MessengerRNA(Block("1", 1000, 1999, Plus), 1000, 1999, Some("name"), Some("gene"))
.getStopCodon === Block("1", 1996, 1999, Plus))
// mRNA and CDS share one endpoint
assert(MessengerRNA(chr1_1000_2000_minus, 1000, 1300, Some("name"), Some("gene"))
.getStopCodon === Block("1", 1000, 1003, Minus))
assert(MessengerRNA(chr1_1000_2000_minus, 1700, 2000, Some("name"), Some("gene"))
.getStopCodon === Block("1", 1700, 1703, Minus))
assert(MessengerRNA(chr1_1000_2000_plus, 1000, 1300, Some("name"), Some("gene"))
.getStopCodon === Block("1", 1297, 1300, Plus))
assert(MessengerRNA(chr1_1000_2000_plus, 1700, 2000, Some("name"), Some("gene"))
.getStopCodon === Block("1", 1997, 2000, Plus))
// mRNA with two blocks
// Strictly contained in one exon
assert(MessengerRNA(chr1_100_200_300_400_plus, 110, 122, Some("name"), Some("gene")).getStopCodon
=== Block("1", 119, 122, Plus))
assert(MessengerRNA(chr1_100_200_300_400_minus, 310, 322, Some("name"), Some("gene")).getStopCodon
=== Block("1", 310, 313, Minus))
// CDS equal to entire mRNA
assert(MessengerRNA(BlockSet(List(Block("1", 100, 199, Plus), Block("1", 300, 399, Plus))),
100, 399, Some("name"), Some("gene")).getStopCodon
=== Block("1", 396, 399, Plus))
// CDS sharing an endpoint with mRNA, contained in two exons
assert(MessengerRNA(chr1_100_200_300_400_plus, 180, 400, Some("name"), Some("gene")).getStopCodon
=== Block("1", 397, 400, Plus))
assert(MessengerRNA(chr1_100_200_300_400_plus, 100, 302, Some("name"), Some("gene")).getStopCodon
=== BlockSet(List(
Block("1", 199, 200, Plus),
Block("1", 300, 302, Plus))))
// CDS sharing an inner endpoint with one exon, contained in exon
assert(MessengerRNA(chr1_100_200_300_400_plus, 170, 200, Some("name"), Some("gene")).getStopCodon
=== Block("1", 197, 200, Plus))
// mRNA with four blocks
// Strictly contained in one exon
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 1500, 1599, Some("name"), Some("gene")).getStopCodon
=== Block("1", 1500, 1503, Minus))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 3500, 3599, Some("name"), Some("gene")).getStopCodon
=== Block("1", 3596, 3599, Plus))
// Strictly contained in two exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1800, 3001, Some("name"), Some("gene")).getStopCodon
=== BlockSet(List(
Block("1", 1998, 2000, Plus),
Block("1", 3000, 3001, Plus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3999, 5200, Some("name"), Some("gene")).getStopCodon
=== BlockSet(List(
Block("1", 3999, 4000, Minus),
Block("1", 5000, 5002, Minus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 5500, 7499, Some("name"), Some("gene")).getStopCodon
=== Block("1", 7496, 7499, Plus))
// CDS equal to three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1000, 6000, Some("name"), Some("gene")).getStopCodon
=== Block("1", 5997, 6000, Plus))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3000, 8000, Some("name"), Some("gene")).getStopCodon
=== Block("1", 3000, 3003, Minus))
// CDS sharing an endpoint with mRNA, contained in three exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1000, 5001, Some("name"), Some("gene")).getStopCodon
=== BlockSet(List(
Block("1", 3998, 4000, Plus),
Block("1", 5000, 5001, Plus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3999, 8000, Some("name"), Some("gene")).getStopCodon
=== BlockSet(List(
Block("1", 3999, 4000, Minus),
Block("1", 5000, 5002, Minus))))
// CDS sharing an inner endpoint with one exon, contained in exon
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 1400, 2000, Some("name"), Some("gene")).getStopCodon
=== Block("1", 1997, 2000, Plus))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3000, 3600, Some("name"), Some("gene")).getStopCodon
=== Block("1", 3000, 3003, Minus))
// CDS sharing an inner endpoint with one exon, contained in two exons
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 3998, 6000, Some("name"), Some("gene")).getStopCodon
=== BlockSet(List(
Block("1", 3998, 4000, Minus),
Block("1", 5000, 5001, Minus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_plus, 3000, 5002, Some("name"), Some("gene")).getStopCodon
=== BlockSet(List(
Block("1", 3999, 4000, Plus),
Block("1", 5000, 5002, Plus))))
assert(MessengerRNA(chr1_1000_2000_3000_4000_5000_6000_7000_8000_minus, 5000, 7500, Some("name"), Some("gene")).getStopCodon
=== Block("1", 5000, 5003, Minus))
// Stop codon spread over 3 exons
assert(MessengerRNA(BlockSet(
List(Block("1", 1000, 2000, Minus),
Block("1", 3000, 3001, Minus),
Block("1", 4000, 5000, Minus)
)), 1999, 4010, Some("name"), Some("gene")).getStopCodon === BlockSet(List(
Block("1", 1999, 2000, Minus),
Block("1", 3000, 3001, Minus),
Block("1", 4000, 4001, Minus))))
assert(MessengerRNA(BlockSet(
List(Block("1", 1000, 2000, Plus),
Block("1", 3000, 3001, Plus),
Block("1", 4000, 5000, Plus)
)), 1990, 4001, Some("name"), Some("gene")).getStopCodon === BlockSet(List(
Block("1", 1999, 2000, Plus),
Block("1", 3000, 3001, Plus),
Block("1", 4000, 4001, Plus))))
assert(MessengerRNA(BlockSet(
List(Block("1", 1000, 2000, Minus),
Block("1", 3000, 3001, Minus),
Block("1", 4000, 4001, Minus),
Block("1", 5000, 5001, Minus),
Block("1", 6000, 7000, Minus)
)), 3000, 6300, Some("name"), Some("gene")).getStopCodon === BlockSet(List(
Block("1", 3000, 3001, Minus),
Block("1", 4000, 4001, Minus),
Block("1", 5000, 5001, Minus))))
assert(MessengerRNA(BlockSet(
List(Block("1", 1000, 2000, Plus),
Block("1", 3000, 3001, Plus),
Block("1", 4000, 4001, Plus),
Block("1", 5000, 5001, Plus),
Block("1", 6000, 7000, Plus)
)), 1700, 5001, Some("name"), Some("gene")).getStopCodon === BlockSet(List(
Block("1", 3000, 3001, Plus),
Block("1", 4000, 4001, Plus),
Block("1", 5000, 5001, Plus))))
}
}
| pamelarussell/sgxlib | src/test/scala/testfeature/MessengerRNAMiscSuite.scala | Scala | mit | 48,149 |
package org.jetbrains.plugins.scala.testingSupport.scalatest.scala2_10.scalatest1_9_2
import org.jetbrains.plugins.scala.testingSupport.scalatest.ScalaTestPackageTest
/**
* @author Roman.Shein
* @since 05.09.2015.
*/
class Scalatest2_10_1_9_2_PackageTest extends Scalatest2_10_1_9_2_Base with ScalaTestPackageTest {
}
| ilinum/intellij-scala | test/org/jetbrains/plugins/scala/testingSupport/scalatest/scala2_10/scalatest1_9_2/Scalatest2_10_1_9_2_PackageTest.scala | Scala | apache-2.0 | 327 |
import scala.annotation.tailrec
/*
Reference: http://aperiodic.net/phil/scala/s-99
P10 (*) Run-length encoding of a list.
Use the result of problem P09 to implement the so-called run-length encoding data compression method. Consecutive duplicates of elements are encoded as tuples (N, E) where N is the number of duplicates of the element E.
Example:
scala> encode(List('a, 'a, 'a, 'a, 'b, 'c, 'c, 'a, 'a, 'd, 'e, 'e, 'e, 'e))
res0: List[(Int, Symbol)] = List((4,'a), (1,'b), (2,'c), (2,'a), (1,'d), (4,'e))
*/
// From P09
def pack[T](l: List[T]): List[List[T]] = {
@tailrec
def helper(l: List[T], acc: List[List[T]]): List[List[T]] = {
l match {
case Nil => acc.reverse
case h :: t => helper(l.dropWhile(_ == h), l.takeWhile(_ == h) :: acc)
}
}
helper(l, Nil)
}
def encode[T](l: List[T]): List[(Int, T)] = pack(l).map(l => (l.length, l.head))
println(encode(List('a, 'a, 'a, 'a, 'b, 'c, 'c, 'a, 'a, 'd, 'e, 'e, 'e, 'e))) | mcamou/s-99 | src/P10.scala | Scala | apache-2.0 | 955 |
/*
* Copyright 2008 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package ${package} {
package snippet {
import _root_.java.text.{ParseException,SimpleDateFormat}
import _root_.scala.xml.{NodeSeq,Text}
import _root_.net.liftweb.http.{RequestVar,S,SHtml}
import _root_.net.liftweb.common.{Box,Empty,Full, Loggable}
import _root_.net.liftweb.util.{Helpers}
import S._
import Helpers._
import _root_.javax.persistence.{EntityExistsException,PersistenceException}
import ${package}.model._
import Model._
// Make an object so that other pages can access (ie Authors)
object BookOps {
// Object to hold search results
object resultVar extends RequestVar[List[Book]](Nil)
}
class BookOps extends Loggable {
val formatter = new _root_.java.text.SimpleDateFormat("yyyyMMdd")
def list (xhtml : NodeSeq) : NodeSeq = {
val books = Model.createNamedQuery[Book]("findAllBooks").getResultList()
books.flatMap(book =>
bind("book", xhtml,
"title" -> Text(book.title),
"published" -> Text(formatter.format(book.published)),
"genre" -> Text(if (book.genre != null) book.genre.toString else ""),
"author" -> Text(book.author.name),
"edit" -> SHtml.link("add.html", () => bookVar(book), Text(?("Edit")))))
}
// Set up a requestVar to track the book object for edits and adds
object bookVar extends RequestVar(new Book())
def book = bookVar.is
// Utility methods for processing a submitted form
def is_valid_Book_? (toCheck : Book) : Boolean =
List((if (toCheck.title.length == 0) { S.error("You must provide a title"); false } else true),
(if (toCheck.published == null) { S.error("You must provide a publish date"); false } else true),
(if (toCheck.genre == null) { S.error("You must select a genre"); false } else true),
(if (toCheck.author == null) { S.error("You must select an author"); false } else true)).forall(_ == true)
def setDate (input : String, toSet : Book) {
try {
toSet.published = formatter.parse(input)
} catch {
case pe : ParseException => S.error("Error parsing the date")
}
}
// The add snippet method
def add (xhtml : NodeSeq) : NodeSeq = {
def doAdd () =
if (is_valid_Book_?(book)) {
try {
Model.mergeAndFlush(book)
redirectTo("list.html")
} catch {
case ee : EntityExistsException => error("That book already exists.")
case pe : PersistenceException => error("Error adding book"); logger.error("Book add failed", pe)
}
}
// Hold a val here so that the closure holds it when we re-enter this method
val current = book
val authors = Model.createNamedQuery[Author]("findAllAuthors").getResultList()
val choices = authors.map(author => (author.id.toString -> author.name)).toList
val default = if (book.author != null) { Full(book.author.id.toString) } else { Empty }
bind("book", xhtml,
"id" -> SHtml.hidden(() => bookVar(current)),
"title" -> SHtml.text(book.title, book.title = _),
"published" -> SHtml.text(formatter.format(book.published), setDate(_, book)) % ("id" -> "published"),
"genre" -> SHtml.select(Genre.getNameDescriptionList, (Box.legacyNullTest(book.genre).map(_.toString) or Full("")), choice => book.genre = Genre.valueOf(choice).getOrElse(null)),
"author" -> SHtml.select(choices, default, {authId : String => book.author = Model.getReference(classOf[Author], authId.toLong)}),
"save" -> SHtml.submit(?("Save"), doAdd))
}
def searchResults (xhtml : NodeSeq) : NodeSeq = BookOps.resultVar.is.flatMap(result =>
bind("result", xhtml, "title" -> Text(result.title), "author" -> Text(result.author.name)))
def search (xhtml : NodeSeq) : NodeSeq = {
var title = ""
def doSearch () = {
BookOps.resultVar(Model.createNamedQuery[Book]("findBooksByTitle", "title" -> ("%" + title.toLowerCase + "%")).getResultList().toList)
}
bind("search", xhtml,
"title" -> SHtml.text(title, x => title = x),
"run" -> SHtml.submit(?("Search"), doSearch _))
}
}
}
}
| wsaccaco/lift | archetypes/lift-archetype-jpa-basic/src/main/resources/archetype-resources/web/src/main/scala/snippet/Books.scala | Scala | apache-2.0 | 4,527 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.random
import java.nio.ByteBuffer
import java.util.{Random => JavaRandom}
import scala.util.hashing.MurmurHash3
import org.apache.spark.util.Utils.timeIt
/**
* This class implements a XORShift random number generator algorithm
* Source:
* Marsaglia, G. (2003). Xorshift RNGs. Journal of Statistical Software, Vol. 8, Issue 14.
* @see <a href="http://www.jstatsoft.org/v08/i14/paper">Paper</a>
* This implementation is approximately 3.5 times faster than
* {@link java.util.Random java.util.Random}, partly because of the algorithm, but also due
* to renouncing thread safety. JDK's implementation uses an AtomicLong seed, this class
* uses a regular Long. We can forgo thread safety since we use a new instance of the RNG
* for each thread.
*/
private[spark] class XORShiftRandom(init: Long) extends JavaRandom(init) {
def this() = this(System.nanoTime)
private var seed = XORShiftRandom.hashSeed(init)
// we need to just override next - this will be called by nextInt, nextDouble,
// nextGaussian, nextLong, etc.
override protected def next(bits: Int): Int = {
var nextSeed = seed ^ (seed << 21)
nextSeed ^= (nextSeed >>> 35)
nextSeed ^= (nextSeed << 4)
seed = nextSeed
(nextSeed & ((1L << bits) -1)).asInstanceOf[Int]
}
override def setSeed(s: Long) {
seed = XORShiftRandom.hashSeed(s)
}
}
/** Contains benchmark method and main method to run benchmark of the RNG */
private[spark] object XORShiftRandom {
/** Hash seeds to have 0/1 bits throughout. */
private def hashSeed(seed: Long): Long = {
val bytes = ByteBuffer.allocate(java.lang.Long.SIZE).putLong(seed).array()
MurmurHash3.bytesHash(bytes)
}
/**
* Main method for running benchmark
* @param args takes one argument - the number of random numbers to generate
*/
def main(args: Array[String]): Unit = {
if (args.length != 1) {
println("Benchmark of XORShiftRandom vis-a-vis java.util.Random")
println("Usage: XORShiftRandom number_of_random_numbers_to_generate")
System.exit(1)
}
println(benchmark(args(0).toInt))
}
/**
* @param numIters Number of random numbers to generate while running the benchmark
* @return Map of execution times for {@link java.util.Random java.util.Random}
* and XORShift
*/
def benchmark(numIters: Int): Map[String, Long] = {
val seed = 1L
val million = 1e6.toInt
val javaRand = new JavaRandom(seed)
val xorRand = new XORShiftRandom(seed)
// this is just to warm up the JIT - we're not timing anything
timeIt(million) {
javaRand.nextInt()
xorRand.nextInt()
}
/* Return results as a map instead of just printing to screen
in case the user wants to do something with them */
Map("javaTime" -> timeIt(numIters) { javaRand.nextInt() },
"xorTime" -> timeIt(numIters) { xorRand.nextInt() })
}
}
| andrewor14/iolap | core/src/main/scala/org/apache/spark/util/random/XORShiftRandom.scala | Scala | apache-2.0 | 3,709 |
package org.rbudzko.streams
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Sink, Source}
object E3_InnerFlow extends App {
/**
* Prepare materializer - this guy can create streams.
* Materializer uses actor system.
*/
implicit lazy val system = ActorSystem()
implicit lazy val materializer = ActorMaterializer()
/**
* Single element 777 modified by other flow
* blueprint and printed.
*/
val inner = Flow[Int]
.map(number => number - 77)
.map(number => number + 300)
Source.single(777)
.via(inner)
.to(Sink.foreach(number => println(number)))
.run()
/**
* Cleanup...
*/
Thread.sleep(5000)
materializer.shutdown()
system.terminate()
}
| rbudzko/workshops-akka | quick-streams/src/main/scala/org/rbudzko/streams/E3_InnerFlow.scala | Scala | apache-2.0 | 771 |
package scredis.protocol
import akka.util.ByteString
import scredis.exceptions._
import scredis.serialization.Reader
import scala.util.{ Try, Success, Failure }
import scala.collection.generic.CanBuildFrom
import scala.collection.mutable.ListBuffer
import java.nio.ByteBuffer
trait Response
case class ErrorResponse(value: String) extends Response
case class SimpleStringResponse(value: String) extends Response
case class IntegerResponse(value: Long) extends Response {
def toBoolean: Boolean = value > 0
}
case class BulkStringResponse(valueOpt: Option[Array[Byte]]) extends Response {
def parsed[R](implicit reader: Reader[R]): Option[R] = valueOpt.map(reader.read)
def flattened[R](implicit reader: Reader[R]): R = parsed[R].get
override def toString = s"BulkStringResponse(" +
s"${valueOpt.map(ByteString(_).decodeString("UTF-8"))})"
}
case class ArrayResponse(length: Int, buffer: ByteBuffer) extends Response {
def headOpt[R](decoder: Decoder[R]): Option[R] = if (length > 0) {
val position = buffer.position
val response = Protocol.decode(buffer)
if (decoder.isDefinedAt(response)) {
val decoded = decoder.apply(response)
buffer.position(position)
Some(decoded)
} else {
throw new IllegalArgumentException(s"Does not know how to parse response: $response")
}
} else {
None
}
def parsed[R, CC[X] <: Traversable[X]](decoder: Decoder[R])(
implicit cbf: CanBuildFrom[Nothing, R, CC[R]]
): CC[R] = {
val builder = cbf()
var i = 0
while (i < length) {
val response = Protocol.decode(buffer)
if (decoder.isDefinedAt(response)) {
builder += decoder.apply(response)
} else {
throw new IllegalArgumentException(s"Does not know how to parse response: $response")
}
i += 1
}
builder.result()
}
def parsedAsPairs[R1, R2, CC[X] <: Traversable[X]](
firstDecoder: Decoder[R1]
)(
secondDecoder: Decoder[R2]
)(implicit cbf: CanBuildFrom[Nothing, (R1, R2), CC[(R1, R2)]]): CC[(R1, R2)] = {
val builder = cbf()
var i = 0
while (i < length) {
val firstResponse = Protocol.decode(buffer)
val firstValue = if (firstDecoder.isDefinedAt(firstResponse)) {
firstDecoder.apply(firstResponse)
} else {
throw new IllegalArgumentException(
s"Does not know how to parse first response: $firstResponse"
)
}
val secondResponse = Protocol.decode(buffer)
val secondValue = if (secondDecoder.isDefinedAt(secondResponse)) {
secondDecoder.apply(secondResponse)
} else {
throw new IllegalArgumentException(
s"Does not know how to parse second response: $secondResponse"
)
}
builder += ((firstValue, secondValue))
i += 2
}
builder.result()
}
def parsedAsPairsMap[R1, R2, CC[X, Y] <: collection.Map[X, Y]](
firstDecoder: Decoder[R1]
)(
secondDecoder: Decoder[R2]
)(implicit cbf: CanBuildFrom[Nothing, (R1, R2), CC[R1, R2]]): CC[R1, R2] = {
val builder = cbf()
var i = 0
while (i < length) {
val firstResponse = Protocol.decode(buffer)
val firstValue = if (firstDecoder.isDefinedAt(firstResponse)) {
firstDecoder.apply(firstResponse)
} else {
throw new IllegalArgumentException(
s"Does not know how to parse first response: $firstResponse"
)
}
val secondResponse = Protocol.decode(buffer)
val secondValue = if (secondDecoder.isDefinedAt(secondResponse)) {
secondDecoder.apply(secondResponse)
} else {
throw new IllegalArgumentException(
s"Does not know how to parse second response: $secondResponse"
)
}
builder += ((firstValue, secondValue))
i += 2
}
builder.result()
}
def parsedAsScanResponse[R, CC[X] <: Traversable[X]](
decoder: Decoder[CC[R]]
): (Long, CC[R]) = {
if (length != 2) {
throw RedisProtocolException(s"Unexpected length for scan-like array response: $length")
}
val nextCursor = Protocol.decode(buffer) match {
case b: BulkStringResponse => b.flattened[String].toLong
case x => throw RedisProtocolException(s"Unexpected response for scan cursor: $x")
}
Protocol.decode(buffer) match {
case a: ArrayResponse if decoder.isDefinedAt(a) => (nextCursor, decoder.apply(a))
case a: ArrayResponse => throw new IllegalArgumentException(
s"Does not know how to parse response: $a"
)
case x => throw RedisProtocolException(s"Unexpected response for scan elements: $x")
}
}
def parsed[CC[X] <: Traversable[X]](decoders: Traversable[Decoder[Any]])(
implicit cbf: CanBuildFrom[Nothing, Try[Any], CC[Try[Any]]]
): CC[Try[Any]] = {
val builder = cbf()
var i = 0
val decodersIterator = decoders.toIterator
while (i < length) {
val response = Protocol.decode(buffer)
val decoder = decodersIterator.next()
val result = response match {
case ErrorResponse(message) => Failure(RedisErrorResponseException(message))
case reply => if (decoder.isDefinedAt(reply)) {
try {
Success(decoder.apply(reply))
} catch {
case e: Throwable => Failure(RedisProtocolException("", e))
}
} else {
Failure(RedisProtocolException(s"Unexpected reply: $reply"))
}
}
builder += result
i += 1
}
builder.result()
}
override def toString = s"ArrayResponse(length=$length, buffer=" +
s"${ByteString(buffer).decodeString("UTF-8")})"
} | Livestream/scredis | src/main/scala/scredis/protocol/Response.scala | Scala | apache-2.0 | 5,642 |
package repository
import java.time.ZonedDateTime
import adapter.BitFlyer.MyExecution
import models.TradingRecord
import repository.model.scalatrader.TradingRecord2
import scalikejdbc.{AutoSession, _}
object RecordRepository {
val tr = TradingRecord.syntax("tr")
def insert(email: String, strategyStateId: Long, child_order_acceptance_id: String, entry: Seq[MyExecution], time: ZonedDateTime): Unit = {
implicit val session = AutoSession
import io.circe.syntax._
import io.circe.generic.auto._
val json = entry.asJson.toString()
sql"insert into trading_record (email, trading_rule_state_id, entry_id, entry_execution, entry_timestamp) values ($email, $strategyStateId, $child_order_acceptance_id, $json, $time)".update
.apply()
}
def update(email: String, child_order_acceptance_id: String, entry_id: String, close: Seq[MyExecution], time: ZonedDateTime): Int = {
implicit val session = AutoSession
import io.circe.syntax._
import io.circe.generic.auto._
val json = close.asJson.toString()
sql"update trading_record set close_id = $child_order_acceptance_id, close_execution = $json, close_timestamp = $time where email = $email and entry_id = $entry_id".update
.apply()
}
def findAll(email: String, strategyStateId: Long, from: ZonedDateTime): List[TradingRecord2] = {
implicit val session = AutoSession
val query = sql"select * from trading_record where email = ${email} and trading_rule_state_id = ${strategyStateId} and entry_timestamp > ${from}"
query
.map((rs: WrappedResultSet) => {
val jsonToExecutions = (json: String) => {
import io.circe.parser._
import io.circe.generic.auto._
decode[Seq[MyExecution]](json).fold(_ => None, Some(_)).getOrElse(Seq.empty[MyExecution])
}
TradingRecord2(
rs.long("id"),
rs.string("email"),
rs.long("trading_rule_state_id"),
rs.string("entry_id"),
jsonToExecutions(rs.string("entry_execution")),
rs.zonedDateTime("entry_timestamp"),
Option(rs.string("close_id")),
jsonToExecutions(rs.string("close_execution")),
Option(rs.zonedDateTime("close_timestamp")),
rs.zonedDateTime("timestamp")
)
})
.list
.apply()
}
}
| rysh/scalatrader | scalatrader/app/repository/RecordRepository.scala | Scala | mit | 2,324 |
package msort
import leon._
import lang._
import annotation._
import instrumentation._
import invariant._
import mem._
import higherorder._
import stats._
/**
* Computing the kth min using a lazy merge sort algorithm that operates bottom-up.
* This allows accessing the minimum element in O(n) time, where n is the number of
* elements in the input list.
* It allows accessing all elements until the kth minimum in O(k.log(n)+n) time:
* see function `kthMin`
* Note that accessing the last element takes O(n.log(n)) which is the complexity of
* sorting all elements using merge sort.
*/
object BottomUpMergeSort {
@inline
def max(x:BigInt, y:BigInt) = if (x >= y) x else y
sealed abstract class List[T] {
// size is used in the specs
def size: BigInt = (this match {
case Nil() => BigInt(0)
case Cons(h, t) => 1 + t.size
}) ensuring (_ >= 0)
// length is used in the implementation
val length: BigInt = (this match {
case Nil() => BigInt(0)
case Cons(h, t) => 1 + t.length
}) ensuring (_ == size)
}
case class Cons[T](x: T, tail: List[T]) extends List[T]
case class Nil[T]() extends List[T]
private sealed abstract class LList {
def size: BigInt = {
this match {
case SCons(_, t) => 1 + t.size
case _ => BigInt(0)
}
} ensuring (_ >= 0)
def height: BigInt = {
this match {
case SCons(_, t) => t.height
case _ => BigInt(0)
}
} ensuring(_ >= 0)
def weightBalanced: Boolean = {
this match {
case SCons(_, t) => t.weightBalanced
case _ => true
}
}
}
private case class SCons(x: BigInt, tailFun: Stream) extends LList
private case class SNil() extends LList
private case class Stream(lfun: () => LList) {
@inline
def size = (list*).size
lazy val list: LList = lfun()
def height: BigInt = {
(lfun fmatch[LList, Stream, BigInt] {
case (a, b) if lfun.is(() => mergeSusp(a, b)) => 1 + max(a.height, b.height)
case _ => BigInt(0)
}): BigInt
}ensuring(_ >= 0)
@invisibleBody
def weightBalanced: Boolean = {
lfun fmatch[LList, Stream, Boolean] {
case (a, b) if lfun.is(() => mergeSusp(a, b)) =>
val asz = a.size
val bsz = b.size
asz > 0 && asz >= bsz && (asz - bsz) <= 2 &&
a.weightBalanced && b.weightBalanced
case _ => true
}
}
}
@inline
private val nilStream: Stream = Stream(() => SNil())
/**
* A function that computes 3 + log_2(x) for x >= 1
* The function is defined as 1 for negative values, and 2 for zero.
*/
def log(x: BigInt) : BigInt = {
if(x < 0) BigInt(1)
else if(x == 0) BigInt(2)
else
1 + log(x/2)
} ensuring(_ >= 1)
@invisibleBody
def logMonotonicity(x: BigInt, y: BigInt): Boolean = {
require(x <= y)
(if(x <= 0) true
else logMonotonicity(x / 2, y / 2)) && log(x) <= log(y)
} holds
@inline
def recSizePost(l: Stream, res: BigInt): Boolean = {
l.lfun fmatch[LList, Stream, Boolean] {
case (a, b) if l.lfun.is(() => mergeSusp(a, b)) =>
val asz = recSizeL(a) -2
val bsz = recSize(b) - 1
logMonotonicity(2 * asz, res - 1) &&
logMonotonicity(2 * bsz, res - 1)
case _ => true
}
}
// the following facts necessary for proving the logarithmic bounds are automatically inferred, but are stated here for the record
/*2 * asz <= res - 1 &&
2 * bsz <= res - 1 &&*/
/*(if(asz >= 1) {
log(asz) + 1 <= log(res - 1)
} else
a.height <= log(res - 1) - 1) &&
(if(bsz >= 1) {
log(bsz) + 1 <= log(res - 1)
} else
b.height <= log(res - 1) - 1)*/
@inline
def recSizeL(l: LList): BigInt = {
l match {
case SCons(_, t) => 1 + recSize(t)
}
}
@invisibleBody
def recSize(l: Stream): BigInt = {
require(l.weightBalanced)
(l.lfun fmatch[LList, Stream, BigInt] {
case (a, b) if l.lfun.is(() => mergeSusp(a, b)) => recSizeL(a) + recSize(b)
case _ => BigInt(0)
}) : BigInt
} ensuring (res => l.size == res && recSizePost(l, res) && l.height <= log(res - 1))
@invisibleBody
def logHeightProperty(l: LList): Boolean = {
require(l.weightBalanced)
val lsz = l.size
(l match {
case SNil() => true
case SCons(_, t) =>
recSize(t) == t.size
}) &&
logMonotonicity(lsz - 2, lsz - 1) &&
l.height <= log(lsz - 1)
} holds
/**
*
* this method is a functional implementation of buildheap in linear time.
*/
@invisibleBody
private def constructMergeTree(l: List[BigInt], from: BigInt, to: BigInt): (LList, List[BigInt]) = {
require(from <= to && from >= 0 && (to - from + 1) <= l.size )
l match {
case Nil() => (SNil(), Nil[BigInt]()) // this case is unreachable
case Cons(x, tail) =>
if(from == to) (SCons(x, nilStream), tail)
else {
val mid = (from + to) / 2
val (lside, midlist) = constructMergeTree(l, from, mid)
val (rside, rest) = constructMergeTree(midlist, mid + 1, to)
(merge(lside, rside), rest)
}
}
} ensuring{ res =>
val range = to - from + 1
val (reslist, rest) = res
reslist.size == range &&
rest.size == l.size - range &&
reslist.weightBalanced &&
alloc <= ? * range + ? // 56 * to - 56 * from + 12
}
@invisibleBody
private def merge(a: LList, b: LList): LList = {
b match {
case SNil() => a
case SCons(x, xs) =>
a match {
case SNil() => b
case SCons(y, ys) =>
if (y < x)
SCons(y, Stream(() => mergeSusp(b, ys))) // here, the order of arguments is changed, the sort is not a stable sort
else
SCons(x, Stream(() => mergeSusp(a, xs)))
}
}
} ensuring{res => a.size + b.size == res.size &&
alloc <= ?
}
/**
* A function that merges two sorted streams of integers.
* Note: the sorted stream of integers may by recursively constructed using merge.
* Takes time linear in the size of the streams (non-trivial to prove due to cascading of lazy calls)
*/
@invisibleBody
private def mergeSusp(a: LList, b: Stream): LList = {
require(a != SNil()) // && a.valid && b.valid)
merge(a, b.list)
} ensuring {res =>
res != SNil() &&
res.height <= max(a.height, b.height) + 1 &&
alloc <= ? * b.height + ?
}
/**
* Takes list of integers and returns a sorted stream of integers.
* Takes time linear in the size of the input since it sorts lazily.
*/
@invisibleBody
private def mergeSort(l: List[BigInt]): LList = {
l match {
case Nil() => SNil()
case _ => constructMergeTree(l, 0, l.length - 1)._1
}
} ensuring (res => res.weightBalanced &&
logHeightProperty(res) &&
l.size == res.size &&
res.height <= log(l.size - 1) &&
alloc <= ? * l.size + ?) // 56 * l.size + 3
private def kthMinRec(l: LList, k: BigInt): BigInt = {
require(k >= 0)
l match {
case SCons(x, xs) =>
if (k == 0) x
else
kthMinRec(xs.list, k - 1)
case SNil() => BigInt(0)
}
} ensuring (_ => alloc <= ? * (k * l.height) + ?)
/**
* A function that accesses the kth element of a list using lazy sorting.
*/
def kthMin(l: List[BigInt], k: BigInt): BigInt = {
require(k >= 0)
kthMinRec(mergeSort(l), k)
} ensuring(_ => alloc <= ? * (k * log(l.size - 1)) + ? * (l.size) + ?)
// @ignore
// def main(args: Array[String]) {
// import scala.util.Random
// import scala.math.BigInt
// import stats._
// println("Running merge sort test...")
// val length = 3000000
// val maxIndexValue = 100
// val randomList = Random.shuffle((0 until length).toList)
// val l1 = randomList.foldRight(List[BigInt]()){
// case (i, acc) => BigInt(i) :: acc
// }
// val l2 = randomList.foldRight(Nil[BigInt](): List[BigInt]){
// case (i, acc) => Cons(BigInt(i), acc)
// }
// println(s"Created inputs of size (${l1.size},${l2.size}), starting operations...")
// val sort1 = timed{ mergeSort(l1) }{t => println(s"Lazy merge sort l1 completed in ${t/1000.0} sec") }
// val sort2 = timed{ mergeSort(l2) }{t => println(s"Lazy merge sort l2 completed in ${t/1000.0} sec") }
// val e1 = timed { kthMin(l1, maxIndexValue) } {t => println(s"Time taken for $maxIndexValue-th min: ${t/1000.0} sec") }
// val e2 = timed { kthMin(l2, maxIndexValue) } {t => println(s"Time taken for $maxIndexValue-th min: ${t/1000.0} sec") }
// }
}
| regb/leon | testcases/benchmarks/alloc/BottomUpMergeSort.scala | Scala | gpl-3.0 | 8,659 |
/**
* Coleccion vacia de mensajes
*/
class TendenciaVacia extends Tendencia {
/**
* Agrega nuevo mensaje
*
* @param mensaje
* @return
*/
def +(mensaje: Tweet) = new TendenciaNoVacia(mensaje, new TendenciaVacia)
/**
* Devolver primer mensaje, al no haber, se lanza excepcion
*
* @return
*/
def head: Tweet = throw new Exception("TendenciaVacia.head")
/**
* Devuelve el resto de mensajes: se lanza excepcion
*
* @return
*/
def tail: Tendencia = throw new Exception("TendenciaVacia.tail")
/**
* Indica si el conjunto esta vacio; true por definicion
*
* @return
*/
def isEmpty: Boolean = true
/**
* Longitud de la tendencia
*
* @return
*/
def length: Integer = 0
/**
* Metodo toString
*
* @return
*/
override def toString = "TendenciaVacia"
}
| fblupi/grado_informatica-NTP | Practicas/P4/src/TendenciaVacia.scala | Scala | gpl-2.0 | 872 |
package com.twitter.finagle.service
import com.twitter.finagle.Filter.TypeAgnostic
import com.twitter.finagle._
import com.twitter.finagle.context.{Deadline, Contexts}
import com.twitter.finagle.client.LatencyCompensation
import com.twitter.finagle.stats.{NullStatsReceiver, StatsReceiver}
import com.twitter.finagle.tracing.Trace
import com.twitter.util.{Future, Duration, Timer}
object TimeoutFilter {
val TimeoutAnnotation: String = "finagle.timeout"
val role: Stack.Role = new Stack.Role("RequestTimeout")
/**
* A class eligible for configuring a [[com.twitter.finagle.Stackable]]
* [[com.twitter.finagle.service.TimeoutFilter]] module.
*/
case class Param(timeout: Duration) {
def mk(): (Param, Stack.Param[Param]) =
(this, Param.param)
}
object Param {
implicit val param: Stack.Param[TimeoutFilter.Param] =
Stack.Param(Param(Duration.Top))
}
/**
* Creates a [[com.twitter.finagle.Stackable]] [[com.twitter.finagle.service.TimeoutFilter]]
* for use in clients.
*/
def clientModule[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] =
new Stack.Module4[
TimeoutFilter.Param,
param.Timer,
LatencyCompensation.Compensation,
param.Stats,
ServiceFactory[Req, Rep]] {
val role: Stack.Role = TimeoutFilter.role
val description: String =
"Apply a timeout-derived deadline to requests; adjust existing deadlines."
def make(
_param: Param,
_timer: param.Timer,
_compensation: LatencyCompensation.Compensation,
_stats: param.Stats,
next: ServiceFactory[Req, Rep]
): ServiceFactory[Req, Rep] = {
val timeout = _param.timeout + _compensation.howlong
if (!timeout.isFinite || timeout <= Duration.Zero) {
next
} else {
val param.Timer(timer) = _timer
val exc = new IndividualRequestTimeoutException(timeout)
val param.Stats(stats) = _stats
val filter = new TimeoutFilter[Req, Rep](
timeout, exc, timer, stats.scope("timeout"))
filter.andThen(next)
}
}
}
/**
* Creates a [[com.twitter.finagle.Stackable]] [[com.twitter.finagle.service.TimeoutFilter]]
* for use in servers.
*/
def serverModule[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] =
new Stack.Module3[
TimeoutFilter.Param,
param.Timer,
param.Stats,
ServiceFactory[Req, Rep]] {
val role: Stack.Role = TimeoutFilter.role
val description: String =
"Apply a timeout-derived deadline to requests; adjust existing deadlines."
def make(
_param: Param,
_timer: param.Timer,
_stats: param.Stats,
next: ServiceFactory[Req, Rep]
): ServiceFactory[Req, Rep] = {
val Param(timeout) = _param
val param.Timer(timer) = _timer
val param.Stats(stats) = _stats
if (!timeout.isFinite || timeout <= Duration.Zero) next else {
val exc = new IndividualRequestTimeoutException(timeout)
val filter = new TimeoutFilter[Req, Rep](
timeout, exc, timer, stats.scope("timeout"))
filter.andThen(next)
}
}
}
def typeAgnostic(
timeout: Duration,
exception: RequestTimeoutException,
timer: Timer
): TypeAgnostic = new TypeAgnostic {
override def toFilter[Req, Rep]: Filter[Req, Rep, Req, Rep] =
new TimeoutFilter[Req, Rep](timeout, exception, timer)
}
}
/**
* A [[com.twitter.finagle.Filter]] that a timeout to requests.
*
* If the response is not satisfied within the `timeout`,
* the [[Future]] will be interrupted via [[Future.raise]].
*
* @param timeout the timeout to apply to requests
* @param exception an exception object to return in cases of timeout exceedance
* @param timer a `Timer` object used to track elapsed time
*
* @see The sections on
* [[https://twitter.github.io/finagle/guide/Clients.html#timeouts-expiration clients]]
* and [[https://twitter.github.io/finagle/guide/Servers.html#request-timeout servers]]
* in the user guide for more details.
*/
class TimeoutFilter[Req, Rep](
timeout: Duration,
exception: RequestTimeoutException,
timer: Timer,
statsReceiver: StatsReceiver)
extends SimpleFilter[Req, Rep] {
def this(timeout: Duration, exception: RequestTimeoutException, timer: Timer) =
this(timeout, exception, timer, NullStatsReceiver)
def this(timeout: Duration, timer: Timer) =
this(timeout, new IndividualRequestTimeoutException(timeout), timer)
private[this] val expiredDeadlineStat = statsReceiver.stat("expired_deadline_ms")
def apply(request: Req, service: Service[Req, Rep]): Future[Rep] = {
val timeoutDeadline = Deadline.ofTimeout(timeout)
// If there's a current deadline, we combine it with the one derived
// from our timeout.
val deadline = Deadline.current match {
case Some(current) => Deadline.combined(timeoutDeadline, current)
case None => timeoutDeadline
}
if (deadline.expired) {
expiredDeadlineStat.add(-deadline.remaining.inMillis)
}
Contexts.broadcast.let(Deadline, deadline) {
val res = service(request)
res.within(timer, timeout).rescue {
case exc: java.util.concurrent.TimeoutException =>
res.raise(exc)
Trace.record(TimeoutFilter.TimeoutAnnotation)
Future.exception(exception)
}
}
}
} | adriancole/finagle | finagle-core/src/main/scala/com/twitter/finagle/service/TimeoutFilter.scala | Scala | apache-2.0 | 5,462 |
package com.wlangiewicz.xbot
import java.net.SocketException
import com.codahale.metrics.MetricRegistry
import com.wlangiewicz.xbot.domain.{MyOrder, Order}
import com.wlangiewicz.xbot.exceptions._
import com.wlangiewicz.xbot.providers.ExchangeProvider
import com.wlangiewicz.xbot.util.{BotUtil, SimpleSpreadOfferRateEstimator}
import org.apache.http.NoHttpResponseException
import org.slf4j.LoggerFactory
import rapture.data.{MissingValueException, TypeMismatchException}
class Bot(exchange: ExchangeProvider, estimator: SimpleSpreadOfferRateEstimator, metrics: MetricRegistry) {
val log = LoggerFactory.getLogger(this.getClass)
val loopCount = metrics.meter("loopCount")
val askCount = metrics.meter("askCount")
val bidCount = metrics.meter("bidCount")
val cancelAskCount = metrics.meter("cancelAskCount")
val cancelBidCount = metrics.meter("cancelBidCount")
val ExchangeExceptionCount = metrics.meter("ExchangeExceptionCount")
val IndexOutOfBoundsExceptionCount = metrics.meter("IndexOutOfBoundsExceptionCount")
val NoSuchElementExceptionCount = metrics.meter("NoSuchElementExceptionCount")
val TypeMismatchExceptionCount = metrics.meter("TypeMismatchExceptionCount")
val MissingValueExceptionCount = metrics.meter("MissingValueExceptionCount")
val SocketExceptionCount = metrics.meter("SocketExceptionCount")
val OtherExceptionCount = metrics.meter("OtherExceptionCount")
protected def bids = exchange.getOrderBookBids.filter(_.units > exchange.minOrderToEvaluate)
protected def asks = exchange.getOrderBookAsks.filter(_.units > exchange.minOrderToEvaluate)
protected def myOrders = exchange.getMyOrders
protected def trades = exchange.getTrades
protected def ticker = exchange.getTicker
protected def myBidsSorted = myOrders.filter(_.`type` == "bid").sortBy(_.rate)(Ordering[Long].reverse)
protected def myAsksSorted = myOrders.filter(_.`type` == "ask").sortBy(_.rate)
def highestNotMyBid(bids: List[Order], myBids: List[MyOrder]): Order = {
getTopOrder(bids, myBids, Ordering[Long].reverse)
}
def lowestNotMyAsk(asks: List[Order], myAsks: List[MyOrder]) = {
getTopOrder(asks, myAsks, Ordering[Long])
}
protected def getTopOrder(orders: List[Order], myOrders: List[MyOrder], ordering: Ordering[Long]): Order ={
val orderRates = orders.map(_.rate)
val myOrderRates = myOrders.map(_.rate)
val diff = orderRates.diff(myOrderRates).sorted(ordering)
orders.find(_.rate == diff.head) match {
case Some(b) => b
case _ => throw new ExchangeException("order not found not found on list myOrders")
}
}
protected def myAsksWithDifferentRate(rate: Long) = {
myAsksSorted.filter(a => Math.abs(a.rate - rate) >= exchange.PLNStepResolution) //the difference must be larger than smallest step
}
protected def myBidsWithDifferentRate(rate: Long) = {
myBidsSorted.filter(b => Math.abs(b.rate - rate) >= exchange.PLNStepResolution)
}
protected def orderWithRate(`type`: String, amount: Long, rate: Long) = {
val orderOutput = exchange.placeOrder(`type`, "LTC", amount, "PLN", rate)
log.info(`type`.toUpperCase + "ing result: " + orderOutput)
}
def runInLoopWithSleep(sleep: Long, operation: String): Unit = {
def processException(e: Exception) = {
log.error(e.toString)
e.printStackTrace()
exchange.invalidateAllCaches()
Thread.sleep(10000)
}
while (true) {
exchange.invalidateAllCaches()
try {
loop(operation)
}
catch {
case error: MissingValueException =>
MissingValueExceptionCount.mark()
processException(error)
case error: TypeMismatchException =>
TypeMismatchExceptionCount.mark()
processException(error)
case error: IndexOutOfBoundsException =>
IndexOutOfBoundsExceptionCount.mark()
processException(error)
case error: NoSuchElementException =>
NoSuchElementExceptionCount.mark()
processException(error)
case error: ExchangeException =>
ExchangeExceptionCount.mark()
processException(error)
case error: SocketException =>
SocketExceptionCount.mark()
processException(error)
case error: NoHttpResponseException =>
processException(error)
case be: BreakingException =>
log.error(be.toString)
be.printStackTrace()
System.exit(1337)
case _ : Throwable =>
OtherExceptionCount.mark()
processException _
}
log.info("Sleeping for: " + sleep + "ms")
Thread.sleep(sleep)
}
}
private def processAsks() = {
val newAskRate = estimator.estimateAskRate(highestNotMyBid(bids, myBidsSorted).rate, lowestNotMyAsk(asks, myAsksSorted).rate)
val asksToCancel = myAsksWithDifferentRate(newAskRate)
val countCanceledAsks = asksToCancel.length
if (countCanceledAsks > 0) {
cancelAskCount.mark()
val cancelAsksOutput = exchange.cancelOrders(asksToCancel.map(_.order_id))
log.info("canceling all " + "ask".toUpperCase + "s: " + cancelAsksOutput)
}
val amountToAsk = BotUtil.simpleAmountRound(exchange.currencyAvailable).toLong
if (amountToAsk > exchange.minAskAmount) {
askCount.mark()
orderWithRate("ask", amountToAsk, newAskRate)
}
}
private def processBids() = {
val newBidRate = estimator.estimateBidRate(highestNotMyBid(bids, myBidsSorted).rate, lowestNotMyAsk(asks, myAsksSorted).rate)
val bidsToCancel = myBidsWithDifferentRate(newBidRate)
val countCanceledBids = bidsToCancel.length
if (countCanceledBids > 0) {
cancelBidCount.mark()
val cancelBidsOutput = exchange.cancelOrders(bidsToCancel.map(_.order_id))
log.info("canceling all " + "bid".toUpperCase + "s: " + cancelBidsOutput)
}
val amountToBid = BotUtil.simpleAmountRound(exchange.paymentCurrencyAvailable * BotUtil.BASE / newBidRate).toLong
if (amountToBid > exchange.minBidAmount) {
bidCount.mark()
orderWithRate("bid", amountToBid, newBidRate)
}
}
def loop(operation: String) = {
loopCount.mark()
log.info("myBidsSorted: " + myBidsSorted)
log.info("myAsksSorted: " + myAsksSorted)
log.info("highestNotMyBid: " + highestNotMyBid(bids, myBidsSorted))
log.info("lowestNotMyAsk: " + lowestNotMyAsk(asks, myAsksSorted))
if(operation == "both" || operation == "sell")
processAsks()
if(operation == "both" || operation == "buy")
processBids()
}
} | wlk/xbot | src/main/scala/com/wlangiewicz/xbot/Bot.scala | Scala | apache-2.0 | 6,536 |
package modules
import models.{CategoryProblem, FoodProblem, RecursiveCategoryProblems}
import uk.ac.ncl.openlab.intake24.errors.LocalLookupError
trait ProblemCheckerService {
def enablePrecacheWarnings(): Unit
def disablePrecacheWarnings(): Unit
def getFoodProblems(code: String, locale: String): Either[LocalLookupError, Seq[FoodProblem]]
def getCategoryProblems(code: String, locale: String): Either[LocalLookupError, Seq[CategoryProblem]]
def getRecursiveCategoryProblems(code: String, locale: String, maxReturnedProblems: Int): Either[LocalLookupError, RecursiveCategoryProblems]
}
| digitalinteraction/intake24 | ApiPlayServer/app/modules/ProblemCheckerService.scala | Scala | apache-2.0 | 601 |
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl.providers.oauth1.secrets
import com.mohiva.play.silhouette.api.util.{ Clock, ExtractableRequest }
import com.mohiva.play.silhouette.impl.exceptions.OAuth1TokenSecretException
import com.mohiva.play.silhouette.impl.providers.oauth1.secrets.CookieSecretProvider._
import com.mohiva.play.silhouette.impl.providers.{ OAuth1Info, OAuth1TokenSecret, OAuth1TokenSecretProvider }
import org.joda.time.DateTime
import play.api.Play
import play.api.Play.current
import play.api.libs.Crypto
import play.api.libs.json.Json
import play.api.mvc.{ Cookie, Result }
import scala.concurrent.Future
import scala.util.{ Failure, Success, Try }
/**
* The cookie secret companion object.
*/
object CookieSecret {
/**
* Converts the [[CookieSecret]]] to Json and vice versa.
*/
implicit val jsonFormat = Json.format[CookieSecret]
/**
* Returns a serialized value of the secret.
*
* @param secret The secret to serialize.
* @return A serialized value of the secret.
*/
def serialize(secret: CookieSecret) = Crypto.encryptAES(Json.toJson(secret).toString())
/**
* Unserializes the secret.
*
* @param str The string representation of the secret.
* @return Some secret on success, otherwise None.
*/
def unserialize(str: String): Try[CookieSecret] = {
Try(Json.parse(Crypto.decryptAES(str))) match {
case Success(json) => json.validate[CookieSecret].asEither match {
case Left(error) => Failure(new OAuth1TokenSecretException(InvalidSecretFormat.format(error)))
case Right(authenticator) => Success(authenticator)
}
case Failure(error) => Failure(new OAuth1TokenSecretException(InvalidSecretFormat.format(error)))
}
}
}
/**
* A token secret which gets persisted in a cookie.
*
* @param value The token secret.
* @param expirationDate The expiration time.
*/
case class CookieSecret(value: String, expirationDate: DateTime) extends OAuth1TokenSecret {
/**
* Checks if the secret is expired. This is an absolute timeout since the creation of
* the secret.
*
* @return True if the secret is expired, false otherwise.
*/
def isExpired = expirationDate.isBeforeNow
/**
* Returns a serialized value of the secret.
*
* @return A serialized value of the secret.
*/
def serialize = CookieSecret.serialize(this)
}
/**
* Saves the secret in a cookie.
*
* @param settings The secret settings.
* @param clock The clock implementation.
*/
class CookieSecretProvider(
settings: CookieSecretSettings,
clock: Clock) extends OAuth1TokenSecretProvider {
/**
* The type of the secret implementation.
*/
type Secret = CookieSecret
/**
* Builds the secret from OAuth info.
*
* @param info The OAuth info returned from the provider.
* @param request The current request.
* @tparam B The type of the request body.
* @return The build secret.
*/
def build[B](info: OAuth1Info)(implicit request: ExtractableRequest[B]): Future[CookieSecret] = {
Future.successful(CookieSecret(info.secret, clock.now.plusSeconds(settings.expirationTime)))
}
/**
* Retrieves the token secret.
*
* @param request The current request.
* @tparam B The type of the request body.
* @return A secret on success, otherwise an failure.
*/
def retrieve[B](implicit request: ExtractableRequest[B]): Future[Secret] = {
request.cookies.get(settings.cookieName) match {
case Some(cookie) => CookieSecret.unserialize(cookie.value) match {
case Success(secret) if secret.isExpired => Future.failed(new OAuth1TokenSecretException(SecretIsExpired))
case Success(secret) => Future.successful(secret)
case Failure(error) => Future.failed(error)
}
case None => Future.failed(new OAuth1TokenSecretException(ClientSecretDoesNotExists.format(settings.cookieName)))
}
}
/**
* Publishes the secret to the client.
*
* @param result The result to send to the client.
* @param secret The secret to publish.
* @param request The current request.
* @tparam B The type of the request body.
* @return The result to send to the client.
*/
def publish[B](result: Result, secret: CookieSecret)(implicit request: ExtractableRequest[B]) = {
result.withCookies(Cookie(name = settings.cookieName,
value = secret.serialize,
maxAge = Some(settings.expirationTime),
path = settings.cookiePath,
domain = settings.cookieDomain,
secure = settings.secureCookie,
httpOnly = settings.httpOnlyCookie))
}
}
/**
* The CookieSecretProvider companion object.
*/
object CookieSecretProvider {
/**
* The error messages.
*/
val ClientSecretDoesNotExists = "[Silhouette][CookieSecretProvider] Secret cookie doesn't exists for name: %s"
val SecretIsExpired = "[Silhouette][CookieSecretProvider] Secret is expired"
val InvalidSecretFormat = "[Silhouette][CookieSecretProvider] Cannot build token secret because of invalid Json format: %s"
}
/**
* The settings for the cookie secret.
*
* @param cookieName The cookie name.
* @param cookiePath The cookie path.
* @param cookieDomain The cookie domain.
* @param secureCookie Whether this cookie is secured, sent only for HTTPS requests.
* @param httpOnlyCookie Whether this cookie is HTTP only, i.e. not accessible from client-side JavaScript code.
* @param expirationTime Secret expiration. Defaults to 5 minutes which provides sufficient time to log in, but
* not too much. This is a balance between convenience and security.
*/
case class CookieSecretSettings(
cookieName: String = "OAuth1TokenSecret",
cookiePath: String = "/",
cookieDomain: Option[String] = None,
secureCookie: Boolean = Play.isProd, // Default to sending only for HTTPS in production, but not for development and test.
httpOnlyCookie: Boolean = true,
expirationTime: Int = 5 * 60)
| rfranco/play-silhouette | silhouette/app/com/mohiva/play/silhouette/impl/providers/oauth1/secrets/CookieSecret.scala | Scala | apache-2.0 | 6,548 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.http.finagle
import com.twitter.finagle.http.Request
import wvlet.airframe.codec.{MessageCodec, MessageContext}
import wvlet.airframe.control.Control
import wvlet.airframe.http.{Endpoint, RPC, Router}
import wvlet.airframe.msgpack.spi.{Packer, Unpacker}
import wvlet.airframe.surface.Surface
import wvlet.airspec.AirSpec
import wvlet.log.LogSupport
/**
*/
object CustomCodecTest extends AirSpec {
sealed trait Suit
case object Spade extends Suit
case object Heart extends Suit
object SuitCodec extends MessageCodec[Suit] {
override def pack(
p: Packer,
v: Suit
): Unit = {
p.packString(v.toString)
}
override def unpack(
u: Unpacker,
v: MessageContext
): Unit = {
u.unpackString match {
case "Spade" => v.setObject(Spade)
case "Heart" => v.setObject(Heart)
case other => v.setError(new IllegalArgumentException(s"Unknown suit: ${other}"))
}
}
}
class MyApi extends LogSupport {
@Endpoint(path = "/hello")
def hello(suit: Suit): String = {
suit.toString
}
}
test(
s"custom codec",
design = Finagle.server
.withRouter(Router.add[MyApi])
.withCustomCodec(Map(Surface.of[Suit] -> SuitCodec))
.design
.add(Finagle.client.syncClientDesign)
) { client: FinagleSyncClient =>
client.send(Request("/hello?suit=Spade")).contentString shouldBe "Spade"
client.send(Request("/hello?suit=Heart")).contentString shouldBe "Heart"
}
}
| wvlet/airframe | airframe-http-finagle/src/test/scala/wvlet/airframe/http/finagle/CustomCodecTest.scala | Scala | apache-2.0 | 2,087 |
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.params.exceptions
case class OutOfRangeException(name: String, value: Double, lowerBound: Double, upperBound: Double)
extends ValidationException(s"Parameter '$name' value is out of range. " +
s"Value `$value` is not in [$lowerBound; $upperBound]")
| deepsense-io/seahorse-workflow-executor | deeplang/src/main/scala/io/deepsense/deeplang/params/exceptions/OutOfRangeException.scala | Scala | apache-2.0 | 885 |
package play.api.libs.ws
import java.io.File
import scala.concurrent.{Future, Promise}
import play.api.libs.iteratee._
import play.api.libs.iteratee.Input._
import play.api.http.{ Writeable, ContentTypeOf }
import com.ning.http.client.{
AsyncHttpClient,
AsyncHttpClientConfig,
RequestBuilderBase,
FluentCaseInsensitiveStringsMap,
HttpResponseBodyPart,
HttpResponseHeaders,
HttpResponseStatus,
Response => AHCResponse,
PerRequestConfig
}
import collection.immutable.TreeMap
import play.core.utils.CaseInsensitiveOrdered
import com.ning.http.util.AsyncHttpProviderUtils
/**
* Asynchronous API to to query web services, as an http client.
*
* Usage example:
* {{{
* WS.url("http://example.com/feed").get()
* WS.url("http://example.com/item").post("content")
* }}}
*
* The value returned is a Future[Response],
* and you should use Play's asynchronous mechanisms to use this response.
*
*/
object WS {
import com.ning.http.client.Realm.{ AuthScheme, RealmBuilder }
import javax.net.ssl.SSLContext
private var clientHolder: Option[AsyncHttpClient] = None
/**
* resets the underlying AsyncHttpClient
*/
def resetClient(): Unit = {
clientHolder.map { clientRef =>
clientRef.close()
}
clientHolder = None
}
/**
* retrieves or creates underlying HTTP client.
*/
def client =
clientHolder.getOrElse {
val playConfig = play.api.Play.maybeApplication.map(_.configuration)
val asyncHttpConfig = new AsyncHttpClientConfig.Builder()
.setConnectionTimeoutInMs(playConfig.flatMap(_.getMilliseconds("ws.timeout")).getOrElse(120000L).toInt)
.setRequestTimeoutInMs(playConfig.flatMap(_.getMilliseconds("ws.timeout")).getOrElse(120000L).toInt)
.setFollowRedirects(playConfig.flatMap(_.getBoolean("ws.followRedirects")).getOrElse(true))
.setUseProxyProperties(playConfig.flatMap(_.getBoolean("ws.useProxyProperties")).getOrElse(true))
playConfig.flatMap(_.getString("ws.useragent")).map { useragent =>
asyncHttpConfig.setUserAgent(useragent)
}
if (playConfig.flatMap(_.getBoolean("ws.acceptAnyCertificate")).getOrElse(false) == false) {
asyncHttpConfig.setSSLContext(SSLContext.getDefault)
}
val innerClient = new AsyncHttpClient(asyncHttpConfig.build())
clientHolder = Some(innerClient)
innerClient
}
/**
* Prepare a new request. You can then construct it by chaining calls.
*
* @param url the URL to request
*/
def url(url: String): WSRequestHolder = WSRequestHolder(url, Map(), Map(), None, None, None, None, None)
/**
* A WS Request.
*/
class WSRequest(_method: String, _auth: Option[Tuple3[String, String, AuthScheme]], _calc: Option[SignatureCalculator]) extends RequestBuilderBase[WSRequest](classOf[WSRequest], _method, false) {
import scala.collection.JavaConverters._
def getStringData = body.getOrElse("")
protected var body: Option[String] = None
override def setBody(s: String) = { this.body = Some(s); super.setBody(s)}
protected var calculator: Option[SignatureCalculator] = _calc
protected var headers: Map[String, Seq[String]] = Map()
protected var _url: String = null
//this will do a java mutable set hence the {} response
_auth.map(data => auth(data._1, data._2, data._3)).getOrElse({})
/**
* Add http auth headers. Defaults to HTTP Basic.
*/
private def auth(username: String, password: String, scheme: AuthScheme = AuthScheme.BASIC): WSRequest = {
this.setRealm((new RealmBuilder())
.setScheme(scheme)
.setPrincipal(username)
.setPassword(password)
.setUsePreemptiveAuth(true)
.build())
}
/**
* Return the current headers of the request being constructed
*/
def allHeaders: Map[String, Seq[String]] = {
mapAsScalaMapConverter(request.asInstanceOf[com.ning.http.client.Request].getHeaders()).asScala.map(e => e._1 -> e._2.asScala.toSeq).toMap
}
/**
* Return the current query string parameters
*/
def queryString: Map[String, Seq[String]] = {
mapAsScalaMapConverter(request.asInstanceOf[com.ning.http.client.Request].getParams()).asScala.map(e => e._1 -> e._2.asScala.toSeq).toMap
}
/**
* Retrieve an HTTP header.
*/
def header(name: String): Option[String] = headers.get(name).flatMap(_.headOption)
/**
* The HTTP method.
*/
def method: String = _method
/**
* The URL
*/
def url: String = _url
private def ningHeadersToMap(headers: java.util.Map[String, java.util.Collection[String]]) =
mapAsScalaMapConverter(headers).asScala.map(e => e._1 -> e._2.asScala.toSeq).toMap
private def ningHeadersToMap(headers: FluentCaseInsensitiveStringsMap) = {
val res = mapAsScalaMapConverter(headers).asScala.map(e => e._1 -> e._2.asScala.toSeq).toMap
//todo: wrap the case insensitive ning map instead of creating a new one (unless perhaps immutabilty is important)
TreeMap(res.toSeq: _*)(CaseInsensitiveOrdered)
}
private[libs] def execute: Future[Response] = {
import com.ning.http.client.AsyncCompletionHandler
var result = Promise[Response]()
calculator.map(_.sign(this))
WS.client.executeRequest(this.build(), new AsyncCompletionHandler[AHCResponse]() {
override def onCompleted(response: AHCResponse) = {
result.success(Response(response))
response
}
override def onThrowable(t: Throwable) = {
result.failure(t)
}
})
result.future
}
/**
* Set an HTTP header.
*/
override def setHeader(name: String, value: String) = {
headers = headers + (name -> List(value))
super.setHeader(name, value)
}
/**
* Add an HTTP header (used for headers with multiple values).
*/
override def addHeader(name: String, value: String) = {
headers = headers + (name -> (headers.get(name).getOrElse(List()) :+ value))
super.addHeader(name, value)
}
/**
* Defines the request headers.
*/
override def setHeaders(hdrs: FluentCaseInsensitiveStringsMap) = {
headers = ningHeadersToMap(hdrs)
super.setHeaders(hdrs)
}
/**
* Defines the request headers.
*/
override def setHeaders(hdrs: java.util.Map[String, java.util.Collection[String]]) = {
headers = ningHeadersToMap(hdrs)
super.setHeaders(hdrs)
}
/**
* Defines the request headers.
*/
def setHeaders(hdrs: Map[String, Seq[String]]) = {
headers = hdrs
hdrs.foreach(header => header._2.foreach(value =>
super.addHeader(header._1, value)
))
this
}
/**
* Defines the query string.
*/
def setQueryString(queryString: Map[String, Seq[String]]) = {
for ((key, values) <- queryString; value <- values) {
this.addQueryParameter(key, value)
}
this
}
/**
* Defines the URL.
*/
override def setUrl(url: String) = {
_url = url
super.setUrl(url)
}
private[libs] def executeStream[A](consumer: ResponseHeaders => Iteratee[Array[Byte], A]): Future[Iteratee[Array[Byte], A]] = {
import com.ning.http.client.AsyncHandler
var doneOrError = false
calculator.map(_.sign(this))
var statusCode = 0
val iterateeP = Promise[Iteratee[Array[Byte], A]]()
var iteratee: Iteratee[Array[Byte], A] = null
WS.client.executeRequest(this.build(), new AsyncHandler[Unit]() {
import com.ning.http.client.AsyncHandler.STATE
override def onStatusReceived(status: HttpResponseStatus) = {
statusCode = status.getStatusCode()
STATE.CONTINUE
}
override def onHeadersReceived(h: HttpResponseHeaders) = {
val headers = h.getHeaders()
iteratee = consumer(ResponseHeaders(statusCode, ningHeadersToMap(headers)))
STATE.CONTINUE
}
override def onBodyPartReceived(bodyPart: HttpResponseBodyPart) = {
if (!doneOrError) {
iteratee = iteratee.pureFlatFold {
case Step.Done(a, e) => {
doneOrError = true
val it = Done(a, e)
iterateeP.success(it)
it
}
case Step.Cont(k) => {
k(El(bodyPart.getBodyPartBytes()))
}
case Step.Error(e, input) => {
doneOrError = true
val it = Error(e, input)
iterateeP.success(it)
it
}
}
STATE.CONTINUE
} else {
iteratee = null
// Must close underlying connection, otherwise async http client will drain the stream
bodyPart.closeUnderlyingConnection()
STATE.ABORT
}
}
override def onCompleted() = {
Option(iteratee).map(iterateeP.success(_))
}
override def onThrowable(t: Throwable) = {
iterateeP.failure(t)
}
})
iterateeP.future
}
}
/**
* A WS Request builder.
*/
case class WSRequestHolder(url: String,
headers: Map[String, Seq[String]],
queryString: Map[String, Seq[String]],
calc: Option[SignatureCalculator],
auth: Option[Tuple3[String, String, AuthScheme]],
followRedirects: Option[Boolean],
timeout: Option[Int],
virtualHost: Option[String]) {
/**
* sets the signature calculator for the request
* @param calc
*/
def sign(calc: SignatureCalculator): WSRequestHolder = this.copy(calc = Some(calc))
/**
* sets the authentication realm
* @param calc
*/
def withAuth(username: String, password: String, scheme: AuthScheme): WSRequestHolder =
this.copy(auth = Some((username, password, scheme)))
/**
* adds any number of HTTP headers
* @param hdrs
*/
def withHeaders(hdrs: (String, String)*): WSRequestHolder = {
val headers = hdrs.foldLeft(this.headers)((m, hdr) =>
if (m.contains(hdr._1)) m.updated(hdr._1, m(hdr._1) :+ hdr._2)
else (m + (hdr._1 -> Seq(hdr._2)))
)
this.copy(headers = headers)
}
/**
* adds any number of query string parameters to the
*/
def withQueryString(parameters: (String, String)*): WSRequestHolder =
this.copy(queryString = parameters.foldLeft(queryString) {
case (m, (k, v)) => m + (k -> (v +: m.get(k).getOrElse(Nil)))
})
/**
* Sets whether redirects (301, 302) should be followed automatically
*/
def withFollowRedirects(follow: Boolean): WSRequestHolder =
this.copy(followRedirects = Some(follow))
/**
* Sets the request timeout in milliseconds
*/
def withTimeout(timeout: Int): WSRequestHolder =
this.copy(timeout = Some(timeout))
def withVirtualHost(vh: String): WSRequestHolder = {
this.copy(virtualHost = Some(vh))
}
/**
* performs a get with supplied body
*/
def get(): Future[Response] = prepare("GET").execute
/**
* performs a get with supplied body
* @param consumer that's handling the response
*/
def get[A](consumer: ResponseHeaders => Iteratee[Array[Byte], A]): Future[Iteratee[Array[Byte], A]] =
prepare("GET").executeStream(consumer)
/**
* Perform a POST on the request asynchronously.
*/
def post[T](body: T)(implicit wrt: Writeable[T], ct: ContentTypeOf[T]): Future[Response] = prepare("POST", body).execute
/**
* Perform a POST on the request asynchronously.
* Request body won't be chunked
*/
def post(body: File): Future[Response] = prepare("POST", body).execute
/**
* performs a POST with supplied body
* @param consumer that's handling the response
*/
def postAndRetrieveStream[A, T](body: T)(consumer: ResponseHeaders => Iteratee[Array[Byte], A])(implicit wrt: Writeable[T], ct: ContentTypeOf[T]): Future[Iteratee[Array[Byte], A]] = prepare("POST", body).executeStream(consumer)
/**
* Perform a PUT on the request asynchronously.
*/
def put[T](body: T)(implicit wrt: Writeable[T], ct: ContentTypeOf[T]): Future[Response] = prepare("PUT", body).execute
/**
* Perform a PUT on the request asynchronously.
* Request body won't be chunked
*/
def put(body: File): Future[Response] = prepare("PUT", body).execute
/**
* performs a PUT with supplied body
* @param consumer that's handling the response
*/
def putAndRetrieveStream[A, T](body: T)(consumer: ResponseHeaders => Iteratee[Array[Byte], A])(implicit wrt: Writeable[T], ct: ContentTypeOf[T]): Future[Iteratee[Array[Byte], A]] = prepare("PUT", body).executeStream(consumer)
/**
* Perform a DELETE on the request asynchronously.
*/
def delete(): Future[Response] = prepare("DELETE").execute
/**
* Perform a HEAD on the request asynchronously.
*/
def head(): Future[Response] = prepare("HEAD").execute
/**
* Perform a OPTIONS on the request asynchronously.
*/
def options(): Future[Response] = prepare("OPTIONS").execute
/**
* Execute an arbitrary method on the request asynchronously.
*
* @param method The method to execute
*/
def execute(method: String): Future[Response] = prepare(method).execute
private[play] def prepare(method: String) = {
val request = new WSRequest(method, auth, calc).setUrl(url)
.setHeaders(headers)
.setQueryString(queryString)
followRedirects.map(request.setFollowRedirects(_))
timeout.map { t: Int =>
val config = new PerRequestConfig()
config.setRequestTimeoutInMs(t)
request.setPerRequestConfig(config)
}
virtualHost.map { v =>
request.setVirtualHost(v)
}
request
}
private[play] def prepare(method: String, body: File) = {
import com.ning.http.client.generators.FileBodyGenerator
import java.nio.ByteBuffer
val bodyGenerator = new FileBodyGenerator(body);
val request = new WSRequest(method, auth, calc).setUrl(url)
.setHeaders(headers)
.setQueryString(queryString)
.setBody(bodyGenerator)
followRedirects.map(request.setFollowRedirects(_))
timeout.map { t: Int =>
val config = new PerRequestConfig()
config.setRequestTimeoutInMs(t)
request.setPerRequestConfig(config)
}
virtualHost.map { v =>
request.setVirtualHost(v)
}
request
}
private[play] def prepare[T](method: String, body: T)(implicit wrt: Writeable[T], ct: ContentTypeOf[T]) = {
val request = new WSRequest(method, auth, calc).setUrl(url)
.setHeaders(Map("Content-Type" -> Seq(ct.mimeType.getOrElse("text/plain"))) ++ headers)
.setQueryString(queryString)
.setBody(wrt.transform(body))
followRedirects.map(request.setFollowRedirects(_))
timeout.map { t: Int =>
val config = new PerRequestConfig()
config.setRequestTimeoutInMs(t)
request.setPerRequestConfig(config)
}
virtualHost.map { v =>
request.setVirtualHost(v)
}
request
}
}
}
/**
* A WS HTTP response.
*/
case class Response(ahcResponse: AHCResponse) {
import scala.xml._
import play.api.libs.json._
/**
* Get the underlying response object.
*/
def getAHCResponse = ahcResponse
/**
* The response status code.
*/
def status: Int = ahcResponse.getStatusCode()
/**
* The response status message.
*/
def statusText: String = ahcResponse.getStatusText()
/**
* Get a response header.
*/
def header(key: String): Option[String] = Option(ahcResponse.getHeader(key))
/**
* The response body as String.
*/
lazy val body: String = {
// RFC-2616#3.7.1 states that any text/* mime type should default to ISO-8859-1 charset if not
// explicitly set, while Plays default encoding is UTF-8. So, use UTF-8 if charset is not explicitly
// set and content type is not text/*, otherwise default to ISO-8859-1
val contentType = Option(ahcResponse.getContentType).getOrElse("application/octet-stream")
val charset = Option(AsyncHttpProviderUtils.parseCharset(contentType)).getOrElse {
if (contentType.startsWith("text/"))
AsyncHttpProviderUtils.DEFAULT_CHARSET
else
"utf-8"
}
ahcResponse.getResponseBody(charset)
}
/**
* The response body as Xml.
*/
lazy val xml: Elem = XML.loadString(body)
/**
* The response body as Json.
*/
lazy val json: JsValue = Json.parse(ahcResponse.getResponseBodyAsBytes)
}
/**
* An HTTP response header (the body has not been retrieved yet)
*/
case class ResponseHeaders(status: Int, headers: Map[String, Seq[String]])
/**
* Sign a WS call.
*/
trait SignatureCalculator {
/**
* Sign it.
*/
def sign(request: WS.WSRequest)
}
| noel-yap/setter-for-catan | play-2.1.1/framework/src/play/src/main/scala/play/api/libs/ws/WS.scala | Scala | apache-2.0 | 17,063 |
package uk.gov.dvla.vehicles.presentation.common.model
import play.api.libs.json.Json
import uk.gov.dvla.vehicles.presentation.common.views.models.{AddressLinesViewModel, AddressAndPostcodeViewModel}
object VmAddressModel {
implicit val JsonFormat = Json.format[AddressModel]
def from(address: AddressAndPostcodeViewModel): AddressModel =
AddressModel(address = joinAddressesIfNeeded(address.toViewFormat))
def from(addressString: String): AddressModel =
AddressModel(address = joinAddressesIfNeeded(addressString.split(",") map (line => line.trim)))
private def countAllowedLineCharacters(s: String) = s.count(_.isLetter)
private def joinAddressesIfNeeded(addresses: Seq[String]): Seq[String] = addresses.toList match {
case head :: second :: tail if countAllowedLineCharacters(head) < AddressLinesViewModel.Form.BuildingNameOrNumberMinLength =>
joinAddressesIfNeeded(s"$head $second" :: tail)
case _ => addresses
}
} | dvla/vehicles-presentation-common | app/uk/gov/dvla/vehicles/presentation/common/model/VmAddressModel.scala | Scala | mit | 961 |
package be.wegenenverkeer.atomium.server
import be.wegenenverkeer.atomium.format.{Url, Link}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSuite, Matchers}
import scala.collection.JavaConverters._
class AbstractFeedPageStoreTest extends FunSuite with FeedStoreTestSupport with Matchers with BeforeAndAfterAll with BeforeAndAfterEach {
private implicit val context: Context = new Context {}
test("empty store") {
val feedStore = new TestFeedStore[Int, Context]()
feedStore.getHeadOfFeed(1).getEntries.size shouldBe 0
feedStore.getHeadOfFeed(3).getEntries.size shouldBe 0
intercept[IllegalArgumentException] {
feedStore.getHeadOfFeed(0)
}
feedStore.getFeed(0, 2, forward = true).get.getEntries.size shouldBe 0
feedStore.getFeed(10, 2, forward = false) shouldBe None
intercept[IllegalArgumentException] {
feedStore.getFeed(10, 0, forward = false)
}
}
test("invalid feed retrieval") {
val feedStore = new TestFeedStore[Int, Context]()
feedStore.push(1) //stored with sequence number 1
feedStore.push(2) //stored with sequence number 2
feedStore.push(3) //stored with sequence number 3
//pageSize = 1
feedStore.getFeed(0, 1, forward = true) shouldNot be(None)
feedStore.getFeed(1, 1, forward = true) shouldNot be(None)
feedStore.getFeed(2, 1, forward = true) shouldNot be(None)
feedStore.getFeed(3, 1, forward = true) should be(None)
feedStore.getFeed(4, 1, forward = false) should be(None)
//can only retrieve two pages by navigating backwards
feedStore.getFeed(3, 1, forward = false) shouldNot be(None)
feedStore.getFeed(2, 1, forward = false) shouldNot be(None)
feedStore.getFeed(1, 1, forward = false) should be(None)
feedStore.getFeed(0, 1, forward = false) should be(None)
//pageSize = 2
feedStore.getFeed(0, 2, forward = true) shouldNot be(None)
feedStore.getFeed(1, 2, forward = true) should be(None)
feedStore.getFeed(2, 2, forward = true) shouldNot be(None)
feedStore.getFeed(3, 2, forward = true) should be(None)
feedStore.getFeed(4, 2, forward = true) should be(None)
feedStore.getFeed(5, 2, forward = false) should be(None)
feedStore.getFeed(4, 2, forward = false) should be(None)
//can only retrieve last page by navigating backwards
feedStore.getFeed(3, 2, forward = false) shouldNot be(None)
feedStore.getFeed(2, 2, forward = false) should be(None)
feedStore.getFeed(1, 2, forward = false) should be(None)
feedStore.getFeed(0, 2, forward = false) should be(None)
//pageSize = 3
feedStore.getFeed(0, 3, forward = true) shouldNot be(None)
feedStore.getFeed(1, 3, forward = true) should be(None)
feedStore.getFeed(2, 3, forward = true) should be(None)
feedStore.getFeed(3, 3, forward = true) should be(None) //feed contains only single page
//can not retrieve any pages by navigating backwards
feedStore.getFeed(4, 3, forward = false) should be(None)
feedStore.getFeed(3, 3, forward = false) should be(None)
feedStore.getFeed(2, 3, forward = false) should be(None)
feedStore.getFeed(1, 3, forward = false) should be(None)
feedStore.getFeed(0, 3, forward = false) should be(None)
//pageSize = 4
feedStore.getFeed(0, 4, forward = true) shouldNot be(None)
feedStore.getFeed(1, 4, forward = true) should be(None)
feedStore.getFeed(2, 4, forward = true) should be(None)
feedStore.getFeed(3, 4, forward = true) should be(None)
feedStore.getFeed(4, 4, forward = true) should be(None)
//can not retrieve any pages by navigating backwards
feedStore.getFeed(5, 4, forward = false) should be(None)
feedStore.getFeed(4, 4, forward = false) should be(None)
feedStore.getFeed(3, 4, forward = false) should be(None)
feedStore.getFeed(2, 4, forward = false) should be(None)
feedStore.getFeed(1, 4, forward = false) should be(None)
}
test("store with consecutive sequence numbers") {
testFeedStorePaging(feedStore = new TestFeedStore[String, Context], pageSize = 5)
}
test("store with missing non-consecutive sequence numbers") {
val feedStore = new TestFeedStore[Int, Context]()
feedStore.push(1) //stored with sequence number 1
feedStore.sequenceNumbersToSkipForPush(1)
feedStore.push(2) //stored with sequence number 3
feedStore.push(3) //stored with sequence number 4
feedStore.sequenceNumbersToSkipForPush(3)
feedStore.push(4) //stored with sequence number 8
feedStore.push(5) //stored with sequence number 9
//move forwards with pageSize 5 from tail
val lastPageOfFeedWithSize5 = feedStore.getFeed(0, 5, forward = true).get
lastPageOfFeedWithSize5.complete shouldBe false //there is no previous feed page (yet)
lastPageOfFeedWithSize5.getEntries.size should be(5)
lastPageOfFeedWithSize5.getEntries.asScala.map( _.getContent.getValue ) should be(List(5, 4, 3, 2, 1))
lastPageOfFeedWithSize5.previousLink.asScala shouldBe None
lastPageOfFeedWithSize5.nextLink.asScala shouldBe None
lastPageOfFeedWithSize5.selfLink shouldBe (new Link(Link.SELF, "0/forward/5"))
//since only 1 page in feed => head equals last
feedStore.getHeadOfFeed(5) shouldEqual lastPageOfFeedWithSize5
feedStore.getFeed(9, 5, forward = true) shouldBe None
//move forwards with pageSize 2 from tail
val lastPageOfFeedWithSize2 = feedStore.getFeed(0, 2, forward = true).get
lastPageOfFeedWithSize2.complete shouldBe true
lastPageOfFeedWithSize2.getEntries.size should be(2)
lastPageOfFeedWithSize2.getEntries.asScala.map( _.getContent.getValue ) should be(List(2, 1))
lastPageOfFeedWithSize2.previousLink.asScala should be(Some(new Link(Link.PREVIOUS, "3/forward/2")))
lastPageOfFeedWithSize2.nextLink.asScala shouldBe None
lastPageOfFeedWithSize2.selfLink should be( new Link(Link.SELF, "0/forward/2"))
//moving forward => previous page
val middlePageOfFeedWithSize2 = feedStore.getFeed(3, 2, forward = true).get
middlePageOfFeedWithSize2.complete shouldBe true
middlePageOfFeedWithSize2.getEntries.size should be(2)
middlePageOfFeedWithSize2.getEntries.asScala.map( _.getContent.getValue ) should be(List(4, 3))
middlePageOfFeedWithSize2.previousLink.asScala should be(Some(new Link(Link.PREVIOUS, "8/forward/2")))
middlePageOfFeedWithSize2.nextLink.asScala should be(Some(new Link(Link.NEXT, "4/backward/2")))
middlePageOfFeedWithSize2.selfLink should be(new Link(Link.SELF, "3/forward/2"))
//moving forward => previous page
val firstPageOfFeedWithSize2 = feedStore.getFeed(8, 2, forward = true).get
firstPageOfFeedWithSize2.complete shouldBe false
firstPageOfFeedWithSize2.getEntries.size should be(1)
firstPageOfFeedWithSize2.getEntries.asScala.map( _.getContent.getValue ) should be(List(5))
firstPageOfFeedWithSize2.previousLink.asScala shouldBe None
firstPageOfFeedWithSize2.nextLink.asScala should be(Some(new Link(Link.NEXT, "9/backward/2")))
firstPageOfFeedWithSize2.selfLink should be(new Link(Link.SELF, "8/forward/2"))
//we are at the head of the feed
feedStore.getHeadOfFeed(2) shouldEqual firstPageOfFeedWithSize2
//moving backwards
feedStore.getFeed(9, 2, forward = false).get shouldEqual middlePageOfFeedWithSize2
feedStore.getFeed(4, 2, forward = false).get shouldEqual lastPageOfFeedWithSize2
}
}
| kwark/atomium | modules/server/src/test/scala/be/wegenenverkeer/atomium/server/AbstractFeedPageStoreTest.scala | Scala | mit | 7,397 |
package scalan.it.lms
import scalan.common.{MetaTestsDslExp, SegmentsDslExp}
import scalan.compilation.lms.cxx.LmsCompilerCxx
import scalan.compilation.lms.scalac.LmsCompilerScala
import scalan.compilation.lms.uni.LmsCompilerUni
import scalan.primitives.{StructExamples, StructItTests}
import scalan.{JNIExtractorOpsExp, ScalanDslExp}
class LmsStructItTests extends StructItTests {
class ProgExp extends ScalanDslExp with StructExamples with SegmentsDslExp with JNIExtractorOpsExp with MetaTestsDslExp
val progStaged = new LmsCompilerScala(new ProgExp)
val progStagedCxx = new LmsCompilerCxx(new ProgExp)
val progStagedU = new LmsCompilerUni(new ProgExp)
val defaultCompilers = compilers(progStaged, progStagedU)
val cxxOnly = compilers(progStagedCxx)
import progStd._
test("Generate CPP source") {
compileSource(s => s.t1.asInstanceOf[s.Rep[Int => Struct]], cxxOnly)
compileSource(s => s.singleFieldStructIn.asInstanceOf[s.Rep[Struct => Int]], cxxOnly)
compileSource(s => s.structInOut.asInstanceOf[s.Rep[Struct => Struct]], cxxOnly)
compileSource(_.structInside, cxxOnly)
}
}
| scalan/scalan | lms-backend/core/src/it/scala/scalan/it/lms/LmsStructItTests.scala | Scala | apache-2.0 | 1,124 |
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.trustedanalytics.sparktk.frame.internal.ops.statistics.covariance
import org.trustedanalytics.sparktk.frame.DataTypes.vector
import org.trustedanalytics.sparktk.frame.internal.{ FrameState, FrameSummarization, BaseFrame }
import org.trustedanalytics.sparktk.frame.{ SchemaHelper, DataTypes, Frame }
trait CovarianceMatrixSummarization extends BaseFrame {
/**
* Calculate covariance matrix for two or more columns.
*
* @note This function applies only to columns containing numerical data.
*
* @param dataColumnNames The names of the columns from whic to compute the matrix. Names should refer
* to a single column of type vector, or two or more columns of numeric scalars.
* @return A matrix with the covariance values for the columns.
*/
def covarianceMatrix(dataColumnNames: List[String]): Frame = {
execute(CovarianceMatrix(dataColumnNames))
}
}
case class CovarianceMatrix(dataColumnNames: List[String]) extends FrameSummarization[Frame] {
require(!dataColumnNames.contains(null), "data columns names cannot be null")
require(dataColumnNames.forall(!_.equals("")), "data columns names cannot be empty")
override def work(state: FrameState): Frame = {
state.schema.requireColumnsAreVectorizable(dataColumnNames)
// compute covariance
val outputColumnDataType = state.schema.columnDataType(dataColumnNames.head)
val outputVectorLength: Option[Long] = outputColumnDataType match {
case vector(length) => Some(length)
case _ => None
}
val covarianceRdd = CovarianceFunctions.covarianceMatrix(state, dataColumnNames, outputVectorLength)
val outputSchema = SchemaHelper.create(dataColumnNames, DataTypes.float64, outputVectorLength)
new Frame(covarianceRdd, outputSchema)
}
}
| trustedanalytics/spark-tk | sparktk-core/src/main/scala/org/trustedanalytics/sparktk/frame/internal/ops/statistics/covariance/CovarianceMatrix.scala | Scala | apache-2.0 | 2,501 |
package spark.network
import java.nio.ByteBuffer
import scala.collection.mutable.ArrayBuffer
private[network]
class MessageChunk(val header: MessageChunkHeader, val buffer: ByteBuffer) {
val size = if (buffer == null) 0 else buffer.remaining
lazy val buffers = {
val ab = new ArrayBuffer[ByteBuffer]()
ab += header.buffer
if (buffer != null) {
ab += buffer
}
ab
}
override def toString = {
"" + this.getClass.getSimpleName + " (id = " + header.id + ", size = " + size + ")"
}
}
| baeeq/incubator-spark | core/src/main/scala/spark/network/MessageChunk.scala | Scala | bsd-3-clause | 525 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.web
import com.twitter.finatra.Request
import com.twitter.util.Time
import com.twitter.zipkin.gen
import java.text.SimpleDateFormat
object QueryRequest {
val fmt = new SimpleDateFormat("MM-dd-yyyy HH:mm:ss")
/**
* Takes a Finatra `Request` and produce the correct `QueryRequest` depending
* on the GET parameters present
*
* Required parameters:
* - service_name: String
* - end_datetime: dateString that matches `fmt`
*
* Optional parameters:
* - limit: Int, default 100
*
* Mapping (excluding above parameters):
* (span_name) => Some(SpanQueryRequest)
* (time_annotation) => Some(AnnotationQueryRequest)
* (annotation_key, annotation_value) => Some(KeyValueAnnotationQueryRequest)
*
* (annotation_key) => Some(ServiceQueryRequest)
* () => None
*/
def apply(request: Request): Option[QueryRequest] = {
val serviceName = request.params.get("serviceName")
val spanName = request.params.get("spanName")
val timeAnnotation = request.params.get("timeAnnotation")
val annotationKey = request.params.get("annotationKey")
val annotationValue = request.params.get("annotationValue")
val endTimestamp = request.params.get("endDatetime") match {
case Some(str) => {
fmt.parse(str).getTime * 1000
}
case _ => {
Time.now.inMicroseconds
}
}
val limit = request.params.get("limit").map{ _.toInt }.getOrElse(100)
val order = gen.Order.DurationDesc
val spanQueryRequest = for (service <- serviceName; span <- spanName)
yield span match {
case "all" => {
SpanQueryRequest(service, "", endTimestamp, limit, order)
}
case _ => {
SpanQueryRequest(service, span, endTimestamp, limit, order)
}
}
val timeAnnotationQueryRequest = for (service <- serviceName; ann <- timeAnnotation)
yield AnnotationQueryRequest(service, ann, endTimestamp, limit, order)
val keyValueQueryRequest = for (service <- serviceName; key <- annotationKey; value <- annotationValue)
yield KeyValueAnnotationQueryRequest(service, key, value, endTimestamp, limit, order)
spanQueryRequest orElse timeAnnotationQueryRequest orElse keyValueQueryRequest
}
}
sealed trait QueryRequest
case class ServiceQueryRequest(serviceName: String, endTimestamp: Long, limit: Int, order: gen.Order) extends QueryRequest
case class SpanQueryRequest(serviceName: String, spanName: String, endTimestamp: Long, limit: Int, order: gen.Order) extends QueryRequest
case class AnnotationQueryRequest(serviceName: String, annotation: String, endTimestamp: Long, limit: Int, order: gen.Order) extends QueryRequest
case class KeyValueAnnotationQueryRequest(serviceName: String, key: String, value: String, endTimestamp: Long, limit: Int, order: gen.Order) extends QueryRequest
| davidbernick/zipkin | zipkin-finatra/src/main/scala/com/twitter/zipkin/web/QueryRequest.scala | Scala | apache-2.0 | 3,551 |
package org.locationtech.geomesa.core.filter
import com.typesafe.scalalogging.slf4j.Logging
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.core.filter.TestFilters._
import org.locationtech.geomesa.core.iterators.TestData._
import org.opengis.filter._
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import org.specs2.specification.Fragments
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class FilterPackageObjectTest extends Specification with Logging {
"The deMorgan function" should {
"change ANDs to ORs" in {
oneLevelAndFilters.flatMap { case (f: And) =>
val dm = deMorgan(f)
dm.isInstanceOf[Or] must beTrue
val dmChildren = dm.asInstanceOf[Or].getChildren
f.getChildren.zip(dmChildren).map {
case (origChild, dmChild) =>
dmChild.isInstanceOf[Not] must beTrue
dmChild.asInstanceOf[Not].getFilter mustEqual origChild
}
}
}
"change ORs to ANDs" in {
oneLevelOrFilters.flatMap { case (f: Or) =>
val dm = deMorgan(f)
dm.isInstanceOf[And] must beTrue
val dmChildren = dm.asInstanceOf[And].getChildren
f.getChildren.zip(dmChildren).map {
case (origChild, dmChild) =>
dmChild.isInstanceOf[Not] must beTrue
dmChild.asInstanceOf[Not].getFilter mustEqual origChild
}
}
}
"remove stacked NOTs" in {
simpleNotFilters.map { case (f: Not) =>
deMorgan(f) mustEqual f.getFilter
}
}
}
// Test logicDistribution
"The function 'logicDistribution'" should {
"split a top-level OR into a List of single-element Lists each containing a filter" in {
oneLevelOrFilters.flatMap { or =>
val ll = logicDistribution(or)
ll.map { l => l.size mustEqual 1}
}
}
"split a top-level AND into a a singleton List which contains a List of the ANDed filters" in {
oneLevelAndFilters.map { case (and: And) =>
val ll = logicDistribution(and)
ll.size mustEqual 1
and.getChildren.size mustEqual ll(0).size
}
}
"not return filters with ANDs or ORs explicitly stated" in {
// NB: The nested lists imply ANDs and ORs.
andsOrsFilters.flatMap { filter: Filter =>
val ll = logicDistribution(filter)
ll.flatten.map { l => l.isInstanceOf[BinaryLogicOperator] must beFalse}
}
}
"take a 'simple' filter and return List(List(filter))" in {
baseFilters.map { f =>
val ll = logicDistribution(f)
ll.size mustEqual 1
ll(0).size mustEqual 1
}
}
}
val mediumDataFeatures = mediumData.map(createSF)
// Function defining rewriteFilter Properties.
def testRewriteProps(filter: Filter): Fragments = {
logger.debug(s"Filter: ${ECQL.toCQL(filter)}")
"The function rewriteFilter" should {
val rewrittenFilter: Filter = rewriteFilter(filter)
"return a Filter with at most one OR at the top" in {
val decomp = decomposeBinary(rewrittenFilter)
val orCount = decomp.count(_.isInstanceOf[Or])
orCount mustEqual 0
}
val children = decomposeOr(rewrittenFilter)
"return a Filter where the children of the (optional) OR can (optionally) be an AND" in {
children.map { _.isInstanceOf[Or] must beFalse }
}
"return a Filter where NOTs do not have ANDs or ORs as children" in {
foreachWhen(children) { case f if f.isInstanceOf[Not] => f.isInstanceOf[BinaryLogicOperator] must beFalse }
}
"return a Filter which is 'equivalent' to the original filter" in {
val originalCount = mediumDataFeatures.count(filter.evaluate)
val rewriteCount = mediumDataFeatures.count(rewrittenFilter.evaluate)
logger.debug(s"\\nFilter: ${ECQL.toCQL(filter)}\\nFullData size: ${mediumDataFeatures.size}: filter hits: $originalCount rewrite hits: $rewriteCount")
rewriteCount mustEqual originalCount
}
}
}
oneGeomFilters.map(testRewriteProps)
} | jwkessi/geomesa | geomesa-core/src/test/scala/org/locationtech/geomesa/core/filter/FilterPackageObjectTest.scala | Scala | apache-2.0 | 4,130 |
package constraints
import java.util.regex.Pattern
import scala.util.matching.Regex
import play.api.data.validation.Constraint
import play.api.data.validation.Constraints._
import play.api.data.validation.{Invalid, Valid, ValidationError}
import helpers.Cache
class FormConstraintsBase(cache: Cache) {
def passwordMinLength: () => Int = cache.config(_.getOptional[Int]("password.min.length").getOrElse(6))
def userNameMinLength: Int = cache.config(_.getOptional[Int]("userName.min.length").getOrElse(6))()
def userNameConstraint: () => Seq[Constraint[String]] =
() => Seq(minLength(userNameMinLength), maxLength(24))
def normalUserNameConstraint: () => Seq[Constraint[String]] = cache.config(
_.getOptional[String]("normalUserNamePattern").map { patStr =>
Seq(pattern(patStr.r, "normalUserNamePatternRule", "normalUserNamePatternError"))
}.getOrElse(
Seq(minLength(userNameMinLength), maxLength(24))
)
)
def passwordMaxLength = 24
val passwordConstraint = List(minLength(passwordMinLength()), maxLength(passwordMaxLength), passwordCharConstraint)
val firstNameConstraint = List(nonEmpty, maxLength(64))
val middleNameConstraint = List(maxLength(64))
val lastNameConstraint = List(nonEmpty, maxLength(64))
val firstNameKanaConstraint = List(nonEmpty, maxLength(64))
val lastNameKanaConstraint = List(nonEmpty, maxLength(64))
val emailConstraint = List(nonEmpty, maxLength(255))
val optionalEmailConstraint = maxLength(255)
val companyNameConstraint = List(nonEmpty, maxLength(32))
def passwordCharConstraint: Constraint[String] = Constraint[String]("constraint.password.char") { s =>
if (s.forall(c => (0x21 <= c && c < 0x7e))) Valid else Invalid(ValidationError("error.pasword.char"))
}
val zip1Pattern = Pattern.compile("\\d{3}")
val zip2Pattern = Pattern.compile("\\d{4}")
val telPattern = Pattern.compile("\\d+{1,32}")
val telOptionPattern = Pattern.compile("\\d{0,32}")
}
| ruimo/store2 | app/constraints/FormConstraintsBase.scala | Scala | apache-2.0 | 1,956 |
/*
* Copyright 2014 Treode, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.treode.store
import scala.util.Random
import org.scalatest.FlatSpec
import Cohort.settled
import StoreTestTools._
class ResidentsSpec extends FlatSpec {
private def residents (cohorts: Int*): Residents =
Atlas (cohorts .map (settled (_)) .toArray, 1) .residents (0)
"Residents.all" should "contain all ids" in {
for (id <- Stream.fill (100) (Random.nextInt))
assert (Residents.all.contains (id))
}
"Residents.contains" should "contain only local cohorts" in {
val rs = residents (0, 1, 0, 2)
assert (rs.contains (0))
assert (rs.contains (2))
assert (!rs.contains (1))
assert (!rs.contains (3))
}
"Residents.stability" should "return the percentage of still-hosted cohorts" in {
assertResult (1.0D) ((Residents.all) stability (residents (0)))
assertResult (0.5D) ((Residents.all) stability (residents (0, 1)))
assertResult (0.25D) ((Residents.all) stability (residents (0, 1, 2, 3)))
assertResult (1.0D) ((residents (0)) stability (Residents.all))
assertResult (1.0D) ((residents (0, 1)) stability (Residents.all))
assertResult (1.0D) ((residents (0, 1, 2, 3)) stability (Residents.all))
assertResult (1.0D) ((residents (0, 1)) stability (residents (0, 1)))
assertResult (0.0D) ((residents (0, 1)) stability (residents (2, 1)))
assertResult (1.0D) ((residents (0, 1)) stability (residents (0, 1, 0, 2)))
assertResult (0.5D) ((residents (0, 1)) stability (residents (0, 1, 2, 3)))
assertResult (1.0D) ((residents (1, 2)) stability (residents (0, 1)))
assertResult (1.0D) ((residents (0, 1, 0, 2)) stability (residents (0, 1)))
assertResult (1.0D) ((residents (0, 1, 2, 3)) stability (residents (0, 1)))
assertResult (0.5D) ((residents (0, 1, 0, 3)) stability (residents (0, 1, 2, 3)))
assertResult (0.25D) {
(residents (0, 1, 0, 3, 0, 5, 0, 7)) stability (residents (0, 1, 2, 3, 4, 5, 6, 7))
}}}
| Treode/store | store/test/com/treode/store/ResidentsSpec.scala | Scala | apache-2.0 | 2,530 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.io.{InterruptedIOException, IOException, UncheckedIOException}
import java.nio.channels.ClosedByInterruptException
import java.util.UUID
import java.util.concurrent.{CountDownLatch, ExecutionException, TimeoutException, TimeUnit}
import java.util.concurrent.atomic.AtomicReference
import java.util.concurrent.locks.ReentrantLock
import scala.collection.JavaConverters._
import scala.collection.mutable.{Map => MutableMap}
import scala.util.control.NonFatal
import com.google.common.util.concurrent.UncheckedExecutionException
import org.apache.hadoop.fs.Path
import org.apache.spark.{SparkContext, SparkException}
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes._
import org.apache.spark.sql.connector.catalog.{SupportsWrite, Table}
import org.apache.spark.sql.connector.read.streaming.{Offset => OffsetV2, ReadLimit, SparkDataStream}
import org.apache.spark.sql.connector.write.{LogicalWriteInfoImpl, SupportsTruncate}
import org.apache.spark.sql.connector.write.streaming.StreamingWrite
import org.apache.spark.sql.execution.QueryExecution
import org.apache.spark.sql.execution.command.StreamingExplainCommand
import org.apache.spark.sql.execution.datasources.v2.StreamWriterCommitProgress
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.{Clock, UninterruptibleThread, Utils}
/** States for [[StreamExecution]]'s lifecycle. */
trait State
case object INITIALIZING extends State
case object ACTIVE extends State
case object TERMINATED extends State
case object RECONFIGURING extends State
/**
* Manages the execution of a streaming Spark SQL query that is occurring in a separate thread.
* Unlike a standard query, a streaming query executes repeatedly each time new data arrives at any
* [[Source]] present in the query plan. Whenever new data arrives, a [[QueryExecution]] is created
* and the results are committed transactionally to the given [[Sink]].
*
* @param deleteCheckpointOnStop whether to delete the checkpoint if the query is stopped without
* errors. Checkpoint deletion can be forced with the appropriate
* Spark configuration.
*/
abstract class StreamExecution(
override val sparkSession: SparkSession,
override val name: String,
private val checkpointRoot: String,
analyzedPlan: LogicalPlan,
val sink: Table,
val trigger: Trigger,
val triggerClock: Clock,
val outputMode: OutputMode,
deleteCheckpointOnStop: Boolean)
extends StreamingQuery with ProgressReporter with Logging {
import org.apache.spark.sql.streaming.StreamingQueryListener._
protected val pollingDelayMs: Long = sparkSession.sessionState.conf.streamingPollingDelay
protected val minLogEntriesToMaintain: Int = sparkSession.sessionState.conf.minBatchesToRetain
require(minLogEntriesToMaintain > 0, "minBatchesToRetain has to be positive")
/**
* A lock used to wait/notify when batches complete. Use a fair lock to avoid thread starvation.
*/
protected val awaitProgressLock = new ReentrantLock(true)
protected val awaitProgressLockCondition = awaitProgressLock.newCondition()
private val initializationLatch = new CountDownLatch(1)
private val startLatch = new CountDownLatch(1)
private val terminationLatch = new CountDownLatch(1)
val resolvedCheckpointRoot = {
val checkpointPath = new Path(checkpointRoot)
val fs = checkpointPath.getFileSystem(sparkSession.sessionState.newHadoopConf())
if (sparkSession.conf.get(SQLConf.STREAMING_CHECKPOINT_ESCAPED_PATH_CHECK_ENABLED)
&& StreamExecution.containsSpecialCharsInPath(checkpointPath)) {
// In Spark 2.4 and earlier, the checkpoint path is escaped 3 times (3 `Path.toUri.toString`
// calls). If this legacy checkpoint path exists, we will throw an error to tell the user how
// to migrate.
val legacyCheckpointDir =
new Path(new Path(checkpointPath.toUri.toString).toUri.toString).toUri.toString
val legacyCheckpointDirExists =
try {
fs.exists(new Path(legacyCheckpointDir))
} catch {
case NonFatal(e) =>
// We may not have access to this directory. Don't fail the query if that happens.
logWarning(e.getMessage, e)
false
}
if (legacyCheckpointDirExists) {
throw new SparkException(
s"""Error: we detected a possible problem with the location of your checkpoint and you
|likely need to move it before restarting this query.
|
|Earlier version of Spark incorrectly escaped paths when writing out checkpoints for
|structured streaming. While this was corrected in Spark 3.0, it appears that your
|query was started using an earlier version that incorrectly handled the checkpoint
|path.
|
|Correct Checkpoint Directory: $checkpointPath
|Incorrect Checkpoint Directory: $legacyCheckpointDir
|
|Please move the data from the incorrect directory to the correct one, delete the
|incorrect directory, and then restart this query. If you believe you are receiving
|this message in error, you can disable it with the SQL conf
|${SQLConf.STREAMING_CHECKPOINT_ESCAPED_PATH_CHECK_ENABLED.key}."""
.stripMargin)
}
}
val checkpointDir = checkpointPath.makeQualified(fs.getUri, fs.getWorkingDirectory)
fs.mkdirs(checkpointDir)
checkpointDir.toString
}
logInfo(s"Checkpoint root $checkpointRoot resolved to $resolvedCheckpointRoot.")
def logicalPlan: LogicalPlan
/**
* Tracks how much data we have processed and committed to the sink or state store from each
* input source.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var committedOffsets = new StreamProgress
/**
* Tracks the offsets that are available to be processed, but have not yet be committed to the
* sink.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var availableOffsets = new StreamProgress
@volatile
var sinkCommitProgress: Option[StreamWriterCommitProgress] = None
/** The current batchId or -1 if execution has not yet been initialized. */
protected var currentBatchId: Long = -1
/** Metadata associated with the whole query */
protected val streamMetadata: StreamMetadata = {
val metadataPath = new Path(checkpointFile("metadata"))
val hadoopConf = sparkSession.sessionState.newHadoopConf()
StreamMetadata.read(metadataPath, hadoopConf).getOrElse {
val newMetadata = new StreamMetadata(UUID.randomUUID.toString)
StreamMetadata.write(newMetadata, metadataPath, hadoopConf)
newMetadata
}
}
/** Metadata associated with the offset seq of a batch in the query. */
protected var offsetSeqMetadata = OffsetSeqMetadata(
batchWatermarkMs = 0, batchTimestampMs = 0, sparkSession.conf)
/**
* A map of current watermarks, keyed by the position of the watermark operator in the
* physical plan.
*
* This state is 'soft state', which does not affect the correctness and semantics of watermarks
* and is not persisted across query restarts.
* The fault-tolerant watermark state is in offsetSeqMetadata.
*/
protected val watermarkMsMap: MutableMap[Int, Long] = MutableMap()
override val id: UUID = UUID.fromString(streamMetadata.id)
override val runId: UUID = UUID.randomUUID
/**
* Pretty identified string of printing in logs. Format is
* If name is set "queryName [id = xyz, runId = abc]" else "[id = xyz, runId = abc]"
*/
protected val prettyIdString =
Option(name).map(_ + " ").getOrElse("") + s"[id = $id, runId = $runId]"
/**
* A list of unique sources in the query plan. This will be set when generating logical plan.
*/
@volatile protected var uniqueSources: Map[SparkDataStream, ReadLimit] = Map.empty
/** Defines the internal state of execution */
protected val state = new AtomicReference[State](INITIALIZING)
@volatile
var lastExecution: IncrementalExecution = _
/** Holds the most recent input data for each source. */
protected var newData: Map[SparkDataStream, LogicalPlan] = _
@volatile
protected var streamDeathCause: StreamingQueryException = null
/* Get the call site in the caller thread; will pass this into the micro batch thread */
private val callSite = Utils.getCallSite()
/** Used to report metrics to coda-hale. This uses id for easier tracking across restarts. */
lazy val streamMetrics = new MetricsReporter(
this, s"spark.streaming.${Option(name).getOrElse(id)}")
/** Isolated spark session to run the batches with. */
private val sparkSessionForStream = sparkSession.cloneSession()
/**
* The thread that runs the micro-batches of this stream. Note that this thread must be
* [[org.apache.spark.util.UninterruptibleThread]] to workaround KAFKA-1894: interrupting a
* running `KafkaConsumer` may cause endless loop.
*/
val queryExecutionThread: QueryExecutionThread =
new QueryExecutionThread(s"stream execution thread for $prettyIdString") {
override def run(): Unit = {
// To fix call site like "run at <unknown>:0", we bridge the call site from the caller
// thread to this micro batch thread
sparkSession.sparkContext.setCallSite(callSite)
runStream()
}
}
/**
* A write-ahead-log that records the offsets that are present in each batch. In order to ensure
* that a given batch will always consist of the same data, we write to this log *before* any
* processing is done. Thus, the Nth record in this log indicated data that is currently being
* processed and the N-1th entry indicates which offsets have been durably committed to the sink.
*/
val offsetLog = new OffsetSeqLog(sparkSession, checkpointFile("offsets"))
/**
* A log that records the batch ids that have completed. This is used to check if a batch was
* fully processed, and its output was committed to the sink, hence no need to process it again.
* This is used (for instance) during restart, to help identify which batch to run next.
*/
val commitLog = new CommitLog(sparkSession, checkpointFile("commits"))
/** Whether all fields of the query have been initialized */
private def isInitialized: Boolean = state.get != INITIALIZING
/** Whether the query is currently active or not */
override def isActive: Boolean = state.get != TERMINATED
/** Returns the [[StreamingQueryException]] if the query was terminated by an exception. */
override def exception: Option[StreamingQueryException] = Option(streamDeathCause)
/** Returns the path of a file with `name` in the checkpoint directory. */
protected def checkpointFile(name: String): String =
new Path(new Path(resolvedCheckpointRoot), name).toString
/**
* Starts the execution. This returns only after the thread has started and [[QueryStartedEvent]]
* has been posted to all the listeners.
*/
def start(): Unit = {
logInfo(s"Starting $prettyIdString. Use $resolvedCheckpointRoot to store the query checkpoint.")
queryExecutionThread.setDaemon(true)
queryExecutionThread.start()
startLatch.await() // Wait until thread started and QueryStart event has been posted
}
/**
* Run the activated stream until stopped.
*/
protected def runActivatedStream(sparkSessionForStream: SparkSession): Unit
/**
* Activate the stream and then wrap a callout to runActivatedStream, handling start and stop.
*
* Note that this method ensures that [[QueryStartedEvent]] and [[QueryTerminatedEvent]] are
* posted such that listeners are guaranteed to get a start event before a termination.
* Furthermore, this method also ensures that [[QueryStartedEvent]] event is posted before the
* `start()` method returns.
*/
private def runStream(): Unit = {
try {
sparkSession.sparkContext.setJobGroup(runId.toString, getBatchDescriptionString,
interruptOnCancel = true)
sparkSession.sparkContext.setLocalProperty(StreamExecution.QUERY_ID_KEY, id.toString)
if (sparkSession.sessionState.conf.streamingMetricsEnabled) {
sparkSession.sparkContext.env.metricsSystem.registerSource(streamMetrics)
}
// `postEvent` does not throw non fatal exception.
val submissionTime = triggerClock.getTimeMillis()
postEvent(new QueryStartedEvent(id, runId, name, submissionTime))
// Unblock starting thread
startLatch.countDown()
// While active, repeatedly attempt to run batches.
SparkSession.setActiveSession(sparkSession)
updateStatusMessage("Initializing sources")
// force initialization of the logical plan so that the sources can be created
logicalPlan
// Adaptive execution can change num shuffle partitions, disallow
sparkSessionForStream.conf.set(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key, "false")
// Disable cost-based join optimization as we do not want stateful operations to be rearranged
sparkSessionForStream.conf.set(SQLConf.CBO_ENABLED.key, "false")
offsetSeqMetadata = OffsetSeqMetadata(
batchWatermarkMs = 0, batchTimestampMs = 0, sparkSessionForStream.conf)
if (state.compareAndSet(INITIALIZING, ACTIVE)) {
// Unblock `awaitInitialization`
initializationLatch.countDown()
runActivatedStream(sparkSessionForStream)
updateStatusMessage("Stopped")
} else {
// `stop()` is already called. Let `finally` finish the cleanup.
}
} catch {
case e if isInterruptedByStop(e, sparkSession.sparkContext) =>
// interrupted by stop()
updateStatusMessage("Stopped")
case e: IOException if e.getMessage != null
&& e.getMessage.startsWith(classOf[InterruptedException].getName)
&& state.get == TERMINATED =>
// This is a workaround for HADOOP-12074: `Shell.runCommand` converts `InterruptedException`
// to `new IOException(ie.toString())` before Hadoop 2.8.
updateStatusMessage("Stopped")
case e: Throwable =>
streamDeathCause = new StreamingQueryException(
toDebugString(includeLogicalPlan = isInitialized),
s"Query $prettyIdString terminated with exception: ${e.getMessage}",
e,
committedOffsets.toOffsetSeq(sources, offsetSeqMetadata).toString,
availableOffsets.toOffsetSeq(sources, offsetSeqMetadata).toString)
logError(s"Query $prettyIdString terminated with error", e)
updateStatusMessage(s"Terminated with exception: ${e.getMessage}")
// Rethrow the fatal errors to allow the user using `Thread.UncaughtExceptionHandler` to
// handle them
if (!NonFatal(e)) {
throw e
}
} finally queryExecutionThread.runUninterruptibly {
// The whole `finally` block must run inside `runUninterruptibly` to avoid being interrupted
// when a query is stopped by the user. We need to make sure the following codes finish
// otherwise it may throw `InterruptedException` to `UncaughtExceptionHandler` (SPARK-21248).
// Release latches to unblock the user codes since exception can happen in any place and we
// may not get a chance to release them
startLatch.countDown()
initializationLatch.countDown()
try {
stopSources()
state.set(TERMINATED)
currentStatus = status.copy(isTriggerActive = false, isDataAvailable = false)
// Update metrics and status
sparkSession.sparkContext.env.metricsSystem.removeSource(streamMetrics)
// Notify others
sparkSession.streams.notifyQueryTermination(StreamExecution.this)
postEvent(
new QueryTerminatedEvent(id, runId, exception.map(_.cause).map(Utils.exceptionString)))
// Delete the temp checkpoint when either force delete enabled or the query didn't fail
if (deleteCheckpointOnStop &&
(sparkSession.sessionState.conf
.getConf(SQLConf.FORCE_DELETE_TEMP_CHECKPOINT_LOCATION) || exception.isEmpty)) {
val checkpointPath = new Path(resolvedCheckpointRoot)
try {
logInfo(s"Deleting checkpoint $checkpointPath.")
val fs = checkpointPath.getFileSystem(sparkSession.sessionState.newHadoopConf())
fs.delete(checkpointPath, true)
} catch {
case NonFatal(e) =>
// Deleting temp checkpoint folder is best effort, don't throw non fatal exceptions
// when we cannot delete them.
logWarning(s"Cannot delete $checkpointPath", e)
}
}
} finally {
awaitProgressLock.lock()
try {
// Wake up any threads that are waiting for the stream to progress.
awaitProgressLockCondition.signalAll()
} finally {
awaitProgressLock.unlock()
}
terminationLatch.countDown()
}
}
}
private def isInterruptedByStop(e: Throwable, sc: SparkContext): Boolean = {
if (state.get == TERMINATED) {
StreamExecution.isInterruptionException(e, sc)
} else {
false
}
}
override protected def postEvent(event: StreamingQueryListener.Event): Unit = {
sparkSession.streams.postListenerEvent(event)
}
/** Stops all streaming sources safely. */
protected def stopSources(): Unit = {
uniqueSources.foreach { case (source, _) =>
try {
source.stop()
} catch {
case NonFatal(e) =>
logWarning(s"Failed to stop streaming source: $source. Resources may have leaked.", e)
}
}
}
/**
* Interrupts the query execution thread and awaits its termination until until it exceeds the
* timeout. The timeout can be set on "spark.sql.streaming.stopTimeout".
*
* @throws TimeoutException If the thread cannot be stopped within the timeout
*/
@throws[TimeoutException]
protected def interruptAndAwaitExecutionThreadTermination(): Unit = {
val timeout = math.max(
sparkSession.sessionState.conf.getConf(SQLConf.STREAMING_STOP_TIMEOUT), 0)
queryExecutionThread.interrupt()
queryExecutionThread.join(timeout)
if (queryExecutionThread.isAlive) {
val stackTraceException = new SparkException("The stream thread was last executing:")
stackTraceException.setStackTrace(queryExecutionThread.getStackTrace)
val timeoutException = new TimeoutException(
s"Stream Execution thread failed to stop within $timeout milliseconds (specified by " +
s"${SQLConf.STREAMING_STOP_TIMEOUT.key}). See the cause on what was " +
"being executed in the streaming query thread.")
timeoutException.initCause(stackTraceException)
throw timeoutException
}
}
/**
* Blocks the current thread until processing for data from the given `source` has reached at
* least the given `Offset`. This method is intended for use primarily when writing tests.
*/
private[sql] def awaitOffset(sourceIndex: Int, newOffset: OffsetV2, timeoutMs: Long): Unit = {
assertAwaitThread()
def notDone = {
val localCommittedOffsets = committedOffsets
if (sources == null) {
// sources might not be initialized yet
false
} else {
val source = sources(sourceIndex)
!localCommittedOffsets.contains(source) || localCommittedOffsets(source) != newOffset
}
}
while (notDone) {
awaitProgressLock.lock()
try {
awaitProgressLockCondition.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
} finally {
awaitProgressLock.unlock()
}
}
logDebug(s"Unblocked at $newOffset for ${sources(sourceIndex)}")
}
/** A flag to indicate that a batch has completed with no new data available. */
@volatile protected var noNewData = false
/**
* Assert that the await APIs should not be called in the stream thread. Otherwise, it may cause
* dead-lock, e.g., calling any await APIs in `StreamingQueryListener.onQueryStarted` will block
* the stream thread forever.
*/
private def assertAwaitThread(): Unit = {
if (queryExecutionThread eq Thread.currentThread) {
throw new IllegalStateException(
"Cannot wait for a query state from the same thread that is running the query")
}
}
/**
* Await until all fields of the query have been initialized.
*/
def awaitInitialization(timeoutMs: Long): Unit = {
assertAwaitThread()
require(timeoutMs > 0, "Timeout has to be positive")
if (streamDeathCause != null) {
throw streamDeathCause
}
initializationLatch.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
}
override def processAllAvailable(): Unit = {
assertAwaitThread()
if (streamDeathCause != null) {
throw streamDeathCause
}
if (!isActive) return
awaitProgressLock.lock()
try {
noNewData = false
while (true) {
awaitProgressLockCondition.await(10000, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
if (noNewData || !isActive) {
return
}
}
} finally {
awaitProgressLock.unlock()
}
}
override def awaitTermination(): Unit = {
assertAwaitThread()
terminationLatch.await()
if (streamDeathCause != null) {
throw streamDeathCause
}
}
override def awaitTermination(timeoutMs: Long): Boolean = {
assertAwaitThread()
require(timeoutMs > 0, "Timeout has to be positive")
terminationLatch.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
} else {
!isActive
}
}
/** Expose for tests */
def explainInternal(extended: Boolean): String = {
if (lastExecution == null) {
"No physical plan. Waiting for data."
} else {
val explain = StreamingExplainCommand(lastExecution, extended = extended)
sparkSession.sessionState.executePlan(explain).executedPlan.executeCollect()
.map(_.getString(0)).mkString("\\n")
}
}
override def explain(extended: Boolean): Unit = {
// scalastyle:off println
println(explainInternal(extended))
// scalastyle:on println
}
override def explain(): Unit = explain(extended = false)
override def toString: String = {
s"Streaming Query $prettyIdString [state = $state]"
}
private def toDebugString(includeLogicalPlan: Boolean): String = {
val debugString =
s"""|=== Streaming Query ===
|Identifier: $prettyIdString
|Current Committed Offsets: $committedOffsets
|Current Available Offsets: $availableOffsets
|
|Current State: $state
|Thread State: ${queryExecutionThread.getState}""".stripMargin
if (includeLogicalPlan) {
debugString + s"\\n\\nLogical Plan:\\n$logicalPlan"
} else {
debugString
}
}
protected def getBatchDescriptionString: String = {
val batchDescription = if (currentBatchId < 0) "init" else currentBatchId.toString
s"""|${Option(name).getOrElse("")}
|id = $id
|runId = $runId
|batch = $batchDescription""".stripMargin
}
protected def createStreamingWrite(
table: SupportsWrite,
options: Map[String, String],
inputPlan: LogicalPlan): StreamingWrite = {
val info = LogicalWriteInfoImpl(
queryId = id.toString,
inputPlan.schema,
new CaseInsensitiveStringMap(options.asJava))
val writeBuilder = table.newWriteBuilder(info)
outputMode match {
case Append =>
writeBuilder.buildForStreaming()
case Complete =>
// TODO: we should do this check earlier when we have capability API.
require(writeBuilder.isInstanceOf[SupportsTruncate],
table.name + " does not support Complete mode.")
writeBuilder.asInstanceOf[SupportsTruncate].truncate().buildForStreaming()
case Update =>
// Although no v2 sinks really support Update mode now, but during tests we do want them
// to pretend to support Update mode, and treat Update mode same as Append mode.
if (Utils.isTesting) {
writeBuilder.buildForStreaming()
} else {
throw new IllegalArgumentException(
"Data source v2 streaming sinks does not support Update mode.")
}
}
}
protected def purge(threshold: Long): Unit = {
logDebug(s"Purging metadata at threshold=$threshold")
offsetLog.purge(threshold)
commitLog.purge(threshold)
}
}
object StreamExecution {
val QUERY_ID_KEY = "sql.streaming.queryId"
val IS_CONTINUOUS_PROCESSING = "__is_continuous_processing"
def isInterruptionException(e: Throwable, sc: SparkContext): Boolean = e match {
// InterruptedIOException - thrown when an I/O operation is interrupted
// ClosedByInterruptException - thrown when an I/O operation upon a channel is interrupted
case _: InterruptedException | _: InterruptedIOException | _: ClosedByInterruptException =>
true
// The cause of the following exceptions may be one of the above exceptions:
//
// UncheckedIOException - thrown by codes that cannot throw a checked IOException, such as
// BiFunction.apply
// ExecutionException - thrown by codes running in a thread pool and these codes throw an
// exception
// UncheckedExecutionException - thrown by codes that cannot throw a checked
// ExecutionException, such as BiFunction.apply
case e2 @ (_: UncheckedIOException | _: ExecutionException | _: UncheckedExecutionException)
if e2.getCause != null =>
isInterruptionException(e2.getCause, sc)
case se: SparkException =>
val jobGroup = sc.getLocalProperty("spark.jobGroup.id")
if (jobGroup == null) return false
val errorMsg = se.getMessage
if (errorMsg.contains("cancelled") && errorMsg.contains(jobGroup) && se.getCause == null) {
true
} else if (se.getCause != null) {
isInterruptionException(se.getCause, sc)
} else {
false
}
case _ =>
false
}
/** Whether the path contains special chars that will be escaped when converting to a `URI`. */
def containsSpecialCharsInPath(path: Path): Boolean = {
path.toUri.getPath != new Path(path.toUri.toString).toUri.getPath
}
}
/**
* A special thread to run the stream query. Some codes require to run in the QueryExecutionThread
* and will use `classOf[QueryxecutionThread]` to check.
*/
abstract class QueryExecutionThread(name: String) extends UninterruptibleThread(name)
| darionyaphet/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala | Scala | apache-2.0 | 28,359 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.nio.ByteBuffer
import java.util.concurrent.{ExecutorService, RejectedExecutionException}
import scala.language.existentials
import scala.util.control.NonFatal
import org.apache.spark._
import org.apache.spark.TaskState.TaskState
import org.apache.spark.internal.Logging
import org.apache.spark.serializer.SerializerInstance
import org.apache.spark.util.{LongAccumulator, ThreadUtils, Utils}
/**
* Runs a thread pool that deserializes and remotely fetches (if necessary) task results.
*/
private[spark] class TaskResultGetter(sparkEnv: SparkEnv, scheduler: TaskSchedulerImpl)
extends Logging {
private val THREADS = sparkEnv.conf.getInt("spark.resultGetter.threads", 4)
// Exposed for testing.
protected val getTaskResultExecutor: ExecutorService =
ThreadUtils.newDaemonFixedThreadPool(THREADS, "task-result-getter")
// Exposed for testing.
protected val serializer = new ThreadLocal[SerializerInstance] {
override def initialValue(): SerializerInstance = {
sparkEnv.closureSerializer.newInstance()
}
}
protected val taskResultSerializer = new ThreadLocal[SerializerInstance] {
override def initialValue(): SerializerInstance = {
sparkEnv.serializer.newInstance()
}
}
def enqueueSuccessfulTask(
taskSetManager: TaskSetManager,
tid: Long,
serializedData: ByteBuffer): Unit = {
getTaskResultExecutor.execute(new Runnable {
override def run(): Unit = Utils.logUncaughtExceptions {
try {
val resultSerializer = taskResultSerializer.get()
val (result, size) = resultSerializer.deserialize[TaskResult[_]](serializedData) match {
case directResult: DirectTaskResult[_] =>
if (!taskSetManager.canFetchMoreResults(serializedData.limit())) {
return
}
// deserialize "value" without holding any lock so that it won't block other threads.
// We should call it here, so that when it's called again in
// "TaskSetManager.handleSuccessfulTask", it does not need to deserialize the value.
directResult.value()
(directResult, serializedData.limit())
case IndirectTaskResult(blockId, size) =>
if (!taskSetManager.canFetchMoreResults(size)) {
// dropped by executor if size is larger than maxResultSize
sparkEnv.blockManager.master.removeBlock(blockId)
return
}
logDebug("Fetching indirect task result for TID %s".format(tid))
scheduler.handleTaskGettingResult(taskSetManager, tid)
val serializedTaskResult = sparkEnv.blockManager.getRemoteBytes(blockId)
if (!serializedTaskResult.isDefined) {
/* We won't be able to get the task result if the machine that ran the task failed
* between when the task ended and when we tried to fetch the result, or if the
* block manager had to flush the result. */
scheduler.handleFailedTask(
taskSetManager, tid, TaskState.FINISHED, TaskResultLost)
return
}
val deserializedResult = resultSerializer.deserialize[DirectTaskResult[_]](
serializedTaskResult.get.toByteBuffer)
// force deserialization of referenced value
deserializedResult.value()
sparkEnv.blockManager.master.removeBlock(blockId)
(deserializedResult, size)
}
// Set the task result size in the accumulator updates received from the executors.
// We need to do this here on the driver because if we did this on the executors then
// we would have to serialize the result again after updating the size.
result.accumUpdates = result.accumUpdates.map { a =>
if (a.name == Some(InternalAccumulator.RESULT_SIZE)) {
val acc = a.asInstanceOf[LongAccumulator]
assert(acc.sum == 0L, "task result size should not have been set on the executors")
acc.setValue(size.toLong)
acc
} else {
a
}
}
scheduler.handleSuccessfulTask(taskSetManager, tid, result)
} catch {
case cnf: ClassNotFoundException =>
val loader = Thread.currentThread.getContextClassLoader
taskSetManager.abort("ClassNotFound with classloader: " + loader)
// Matching NonFatal so we don't catch the ControlThrowable from the "return" above.
case NonFatal(ex) =>
logError("Exception while getting task result", ex)
taskSetManager.abort("Exception while getting task result: %s".format(ex))
}
}
})
}
def enqueueFailedTask(taskSetManager: TaskSetManager, tid: Long, taskState: TaskState,
serializedData: ByteBuffer) {
var reason : TaskFailedReason = UnknownReason
try {
getTaskResultExecutor.execute(new Runnable {
override def run(): Unit = Utils.logUncaughtExceptions {
val loader = Utils.getContextOrSparkClassLoader
try {
if (serializedData != null && serializedData.limit() > 0) {
reason = serializer.get().deserialize[TaskFailedReason](
serializedData, loader)
}
} catch {
case cnd: ClassNotFoundException =>
// Log an error but keep going here -- the task failed, so not catastrophic
// if we can't deserialize the reason.
logError(
"Could not deserialize TaskEndReason: ClassNotFound with classloader " + loader)
case ex: Exception => // No-op
}
scheduler.handleFailedTask(taskSetManager, tid, taskState, reason)
}
})
} catch {
case e: RejectedExecutionException if sparkEnv.isStopped =>
// ignore it
}
}
def stop() {
getTaskResultExecutor.shutdownNow()
}
}
| big-pegasus/spark | core/src/main/scala/org/apache/spark/scheduler/TaskResultGetter.scala | Scala | apache-2.0 | 6,880 |
object Test {
def foo () {
val x = if ("abc" != "def") 1 else 0
}
}
| JetBrains/intellij-scala | scala/scala-impl/testdata/uast/IfStatement.scala | Scala | apache-2.0 | 76 |
package com.twitter.scalding
import scala.annotation.tailrec
import cascading.pipe._
import org.specs._
/*
* Zip uses side effect construct to create zipped list.
*/
class Zip(args : Args) extends Job(args) {
//import RichPipe._
def createState = new {
var lastLine: String = null
def release() {}
}
val zipped = Tsv("line",('line)).pipe
.using { createState }
.flatMap[String, (String, String)] ('line -> ('l1, 'l2)) { case (accu, line) =>
if (accu.lastLine == null) {
accu.lastLine = line
List()
} else {
val zipped = List((accu.lastLine, line))
accu.lastLine = line
zipped
}
}
.project('l1, 'l2)
zipped.write(Tsv("zipped"))
}
class SideEffectTest extends Specification with TupleConversions with FieldConversions {
"Zipper should do create zipped sequence. Coded with side effect" should {
JobTest("com.twitter.scalding.Zip")
.source(Tsv("line",('line)), List(Tuple1("line1"), Tuple1("line2"), Tuple1("line3"), Tuple1("line4")))
.sink[(String, String)](Tsv("zipped")) { ob =>
"correctly compute zipped sequence" in {
val res = ob.toList
val expected = List(("line1", "line2"), ("line2", "line3"), ("line3", "line4"))
res.zip(expected) foreach {
case ((a, b), (c, d)) =>
a must be_== ( c )
b must be_== ( d )
}
}
}
.run
.finish
}
}
/*
* ZipBuffer uses (unneccessary) side effect to construct zipped.
*/
class ZipBuffer(args : Args) extends Job(args) {
//import RichPipe._
def createState = new {
var lastLine: String = null
def release() {}
}
val zipped = Tsv("line",('line)).pipe
.map('line -> 'oddOrEven) { line : String => line.substring(line.length-1).toInt % 2 match {
case 0 => "even"
case 1 => "odd"
}}
.groupBy('oddOrEven) {
_.using { createState }
.mapStream('line -> ('l1, 'l2)) { (accu, iter : Iterator[String]) => {
accu.lastLine = iter.next()
for (line <- iter) yield {
val result = (accu.lastLine, line)
accu.lastLine = line
result
}
}}
}
.project('l1, 'l2)
zipped.write(Tsv("zipped"))
}
class SideEffectBufferTest extends Specification with TupleConversions with FieldConversions {
"ZipBuffer should do create two zipped sequences, one for even lines and one for odd lines. Coded with side effect" should {
JobTest("com.twitter.scalding.ZipBuffer")
.source(Tsv("line",('line)), List(Tuple1("line1"), Tuple1("line2"), Tuple1("line3"), Tuple1("line4"), Tuple1("line5"), Tuple1("line6")))
.sink[(String, String)](Tsv("zipped")) { ob =>
"correctly compute zipped sequence" in {
val res = ob.toList.sorted
val expected = List(("line1", "line3"), ("line3", "line5"), ("line2", "line4"), ("line4", "line6")).sorted
res.zip(expected) foreach {
case ((a, b), (c, d)) =>
a must be_== ( c )
b must be_== ( d )
}
}
}
.run
.finish
}
}
| stripe/scalding | scalding-core/src/test/scala/com/twitter/scalding/SideEffectTest.scala | Scala | apache-2.0 | 3,195 |
package uk.co.sprily
package dh
package harvester
import java.util.concurrent.ScheduledExecutorService
import scala.concurrent.duration._
import com.github.nscala_time.time.Imports.DateTime
import com.typesafe.scalalogging.LazyLogging
import scalaz.concurrent._
import scalaz.stream._
import uk.co.sprily.dh.scheduling._
trait RequestLike {
type Device <: DeviceLike
type Selection = Device#AddressSpace
val device: Device
val selection: Selection
}
trait ResponseLike {
type Device <: DeviceLike
type Measurement = Device#Measurement
val timestamp: DateTime
val device: Device
val measurement: Measurement
}
trait RequestHandler extends LazyLogging {
type Request <: RequestLike
type Response <: ResponseLike
// Some handlers are going to require life-cycle management, eg. setting
// up TCP connection pools.
def startup(): Unit = { }
def shutdown(): Unit = { }
def apply(request: Request): Task[Response]
final def responses(requests: Process[Task,Request],
interval: FiniteDuration): Process[Task, (Request,Response)] = {
implicit val Strat = Strategy.DefaultStrategy
implicit val Sched = Strategy.DefaultTimeoutScheduler
val requestRate = rate(Schedule.each(interval))
val responses = requests.flatMap(req => Process.eval(apply(req)).map(res => (req,res)))
retryEvery(Schedule.each(5.seconds))(requestRate zip responses) map (_._2)
}
final def recurring(request: Request, interval: FiniteDuration): Process[Task, Response] = {
responses(Process.constant(request), interval).map(_._2)
}
private def rate(s: Schedule): Process[Task, TargetLike] = {
implicit val Strat = Strategy.DefaultStrategy
implicit val Sched = Strategy.DefaultTimeoutScheduler
def next(t: s.Target) = Process.await(Task.delay(s.completed(t))) { t => go(t) }
def go(target: Option[s.Target]): Process[Task, s.Target] = {
target match {
case None => Process.halt
case Some(t) => sleep(t) ++ Process.emit(t) ++ next(t)
}
}
val t0 = Task.delay(Some(s.start()))
Process.await(t0) { t => go(t) }
}
private def sleep(t: TargetLike)(implicit es: ScheduledExecutorService) = {
t.initialDelay() match {
case Duration.Zero => Process.empty
case d => time.sleep(d)
}
}
private def retryEvery[A](schedule: Schedule)
(p: Process[Task,A])
(implicit s: ScheduledExecutorService): Process[Task,A] = {
val logErrors = channel.lift[Task,Throwable,Unit](t => Task { logger.warn(s"Request error: $t") })
rate(schedule).flatMap { _ =>
p.attempt().observeW(logErrors).stripW
}
}
}
// Mixed in to package object
trait HandlerTypes {
type Dispatch = PartialFunction[RequestLike, Task[ResponseLike]]
}
| sprily/datahopper | harvester/src/main/scala/handler.scala | Scala | gpl-3.0 | 2,839 |
/**
*
* HttpClientTest
* Ledger wallet
*
* Created by Pierre Pollastri on 30/06/15.
*
* The MIT License (MIT)
*
* Copyright (c) 2015 Ledger
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
package co.ledger.wallet.core.net
import java.util.concurrent.CountDownLatch
import android.net.Uri
import android.test.InstrumentationTestCase
import co.ledger.wallet.core.utils.logs.Logger
import junit.framework.Assert
import org.json.JSONObject
import scala.collection.JavaConversions._
import scala.util.{Failure, Success}
import scala.concurrent.ExecutionContext.Implicits.global
class HttpClientTest extends InstrumentationTestCase {
var client: HttpClient = _
var signal: CountDownLatch = _
override def setUp(): Unit = {
super.setUp()
client = new HttpClient(Uri.parse("http://httpbin.org"))
signal = new CountDownLatch(1)
}
def testGet(): Unit = {
client
.get("get")
.param("Toto" -> 12)
.json.onComplete {
case Success((json, response)) =>
assert("http://httpbin.org/get?Toto=12" == json.get("url"))
assert(12 == json.getJSONObject("args").getInt("Toto"))
signal.countDown()
case Failure(ex) =>
ex.printStackTrace()
}
signal.await()
}
def testPost(): Unit = {
val json = new JSONObject()
json.put("a_param", "a_value")
json.put("another_param", 42)
client
.post("post")
.body(json)
.json.onComplete {
case Success((result, response)) =>
val data = new JSONObject(result.getString("data"))
for (key <- json.keys()) {
assert(json.get(key) == data.get(key))
}
signal.countDown()
case Failure(ex) =>
ex.printStackTrace()
}
signal.await()
}
}
| LedgerHQ/ledger-wallet-android | app/src/androidTest/scala/co/ledger/wallet/core/net/HttpClientTest.scala | Scala | mit | 2,821 |
// /////////////////////////////////////////// //
// Fureteur - https://github.com/gip/fureteur //
// /////////////////////////////////////////// //
import java.io.File
import org.apache.commons.io.FileUtils
import org.json4s.ShortTypeHints
import org.json4s.jackson.Serialization
case class HTMLContent(url:String, html:String)
// Taking URLs in batches from a file
class fileBatchPrefetcher(config: Config, control: Control)
extends genericBatchProducer[Data](config.getInt("batch_size"),
config.getInt("threshold_in_batches"),
config.getLongOption("timeout_ms"), control) {
val file = config("file_name")
log.info("Opening " + file)
val data = scala.io.Source.fromFile(file).getLines().toArray
var index = 0
var batch = 0
override def getBatch(sz: Int): Option[List[Data]] = {
if (index > data.size) {
return None
}
index += sz
batch += 1
val listURL = data.slice(index - sz, index).toList
log.info("Fetched " + listURL.length.toString + " entrie(s) from " + file)
val d = Data.empty()
Some(listURL map (e => d ++ List(("fetch_url", e), ("batch", batch.toString))))
}
}
class fileBatchWriteback(config: Config, control: Control) extends genericBatchReseller[Data](control) {
//val log = Logging(context.system, this)
val fileName = config("file_name")
def resell(batch: List[Data]) = {
log.info("Writing " + batch.length.toString + " entrie(s) to " + fileName)
def doit(b: List[Data]): Unit = {
b match {
case x :: xs => {
// get just important information, fetch_url and fetch_data
// then convert them to a json string
implicit val formats = Serialization.formats(
ShortTypeHints(
List(
classOf[HTMLContent]
)
)
)
val content = new HTMLContent(x("fetch_url"), x("fetch_data"))
val json = Serialization.writePretty(content)
FileUtils writeStringToFile(new File(fileName), json, "UTF-8")
// val s = x.toJson + "\n"
// FileUtils writeStringToFile(new File(fileName), s, "UTF-8")
doit(xs)
}
case Nil =>
}
}
doit(batch)
}
} | njosephef/fetcher | src/main/scala/fileio.scala | Scala | mit | 2,227 |
package testkit
import java.net.URI
import io.netty.channel.socket.SocketChannel
import io.netty.bootstrap.Bootstrap
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel._
import io.netty.channel.socket.nio.NioSocketChannel
import io.netty.handler.codec.http.{DefaultHttpHeaders, HttpClientCodec, HttpObjectAggregator}
import io.netty.handler.codec.http.websocketx.{TextWebSocketFrame, WebSocketClientHandshakerFactory, WebSocketFrame, WebSocketVersion}
import io.netty.handler.codec.http.websocketx.extensions.compression.WebSocketClientCompressionHandler
import testkit.WebSocketClientHandler
object run {
def main(args: Array[String]): Unit = {
val uri = new URI("ws://188.166.20.21:8080")
val handler = new WebSocketClientHandler(WebSocketClientHandshakerFactory.newHandshaker(uri, WebSocketVersion.V00, null, true, (new DefaultHttpHeaders).add("origin", "127.0.0.1")))
val group: EventLoopGroup = new NioEventLoopGroup
val b: Bootstrap = new Bootstrap
b.group(group).channel(classOf[NioSocketChannel]).handler(new ChannelInitializer[SocketChannel]() {
protected def initChannel(ch: SocketChannel) {
val p: ChannelPipeline = ch.pipeline
p.addLast(new HttpClientCodec, new HttpObjectAggregator(8192), WebSocketClientCompressionHandler.INSTANCE, handler)
}
})
val ch: Channel = b.connect(uri.getHost, 8080).sync.channel
handler.handshakeFuture.sync
val frame: WebSocketFrame = new TextWebSocketFrame("LARGE")
ch.writeAndFlush(frame)
}
}
| wsbench/wsbench | testkit/src/main/scala/run.scala | Scala | mit | 1,535 |
package com.ing.bakery.interaction.spring
import com.ing.baker.recipe.javadsl.Interaction
class OutputEvent(outputIngredient: String)
class TestInteraction(append: String) extends Interaction {
def apply(input: String): OutputEvent = {
val output = append + ":" + input
println(output)
new OutputEvent(output)
}
}
class TestInteraction2(append: String) extends Interaction {
def apply(input: String): OutputEvent = {
val output = append + ":" + input
println(output)
new OutputEvent(output)
}
}
class TestInteraction3(append: String) extends Interaction {
def apply(input: String): OutputEvent = {
val output = append + ":" + input
println(output)
new OutputEvent(output)
}
} | ing-bank/baker | bakery/interaction-spring/src/test/scala/com/ing/bakery/interaction/spring/TestInteraction.scala | Scala | mit | 728 |
package org.skycastle.server
/**
*
*/
trait Avatar {
/*
/**
* Do some action available to the avatar, like as moving around, controlling wielded equipment or vehicles,
* or changing settings for controlled guild.
*/
def doAction(name: Symbol, parameters: Data)
/**
* All things that are perceived by this avatar, both internal state like hitpoints and attributes,
* as well as outside entities like characters, items and terrain, and also 'known' things like guild properties.
*/
def perceptions(): Data
*/
}
| zzorn/skycastle | src/main/scala/org/skycastle/server/Avatar.scala | Scala | gpl-2.0 | 544 |
package ch.thmx.arpibot.tests
import org.scalatest._
import javax.jws.{WebMethod, WebService}
import javax.xml.ws.Endpoint
import javax.xml.ws.Service
@WebService
trait SoapTrait {
@WebMethod
def test(value: String): String
}
@WebService(endpointInterface = "ch.thmx.arpibot.tests.SoapTrait")
private class MinimalSoapServer extends SoapTrait {
def test(value: String) = "Hi " + value
}
import java.net.URL
import javax.xml.namespace.QName
class WebServiceTest extends FlatSpec {
it should "launch the WebService and reach it" in {
val wsURL = "http://localhost:8080/wstest"
val endpoint = Endpoint.publish(wsURL, new MinimalSoapServer())
System.out.println("WebService launched... Waiting for requests...")
val url = new URL(wsURL + "?wsdl")
val qname = new QName("http://tests.arpibot.thmx.ch/", "MinimalSoapServerService")
val service = Service.create(url, qname)
val port = service.getPort(classOf[SoapTrait])
println(port.test("salut"))
}
} | ThmX/ARPi-Bot | RasPi/src/test/scala/ch/thmx/arpibot/tests/WebServiceTest.scala | Scala | mit | 1,004 |
package org.jetbrains.plugins.scala
package codeInspection
package collections
import com.intellij.codeInspection.ProblemsHolder
import com.intellij.psi.PsiElement
import scala.collection.immutable.ArraySeq
/**
* @author Nikolay.Tropin
*/
class SideEffectsInMonadicTransformationInspection extends OperationOnCollectionInspection {
override def actionFor(implicit holder: ProblemsHolder, isOnTheFly: Boolean): PartialFunction[PsiElement, Any] = {
case qual`.monadicMethod`(arg) =>
exprsWithSideEffects(arg).foreach(
expr => holder.registerProblem(expr, ScalaInspectionBundle.message("side.effects.in.monadic"), highlightType)
)
}
override def possibleSimplificationTypes: ArraySeq[SimplificationType] = ArraySeq.empty
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/codeInspection/collections/SideEffectsInMonadicTransformationInspection.scala | Scala | apache-2.0 | 758 |
package oil
import _root_.views.html.b3.B3FieldConstructor
import play.api.i18n.Lang
import play.twirl.api.Html
trait Field[T] {
def constraints: Seq[Constraint[T]]
def data: Option[String]
def formatter: Formatter[T]
def inputProvider: InputProvider[T]
def hasErrors: Boolean
/**
* Returns the list of errors associated with this field.
* The list will be empty if this field has no errors.
* @return the list of errors.
*/
def errors: Seq[FormError]
def isValid: Boolean
/**
* Returns this field value if `data` was successfully formatted and all `constraints` are valid.
*/
def value: Option[T]
def render(fieldName: String, args: (Symbol,Any)*)(implicit handler: B3FieldConstructor, lang: Lang): Html = {
inputProvider.render(fieldName)(handler, lang)
}
/**
* Returns a copy of this Field but with the given `data`.
* @param data the new data.
*/
def withData(data: Option[String]): Field[T]
/**
* Returns a copy of this Field but with the given `value`.
*
* Note that calling `.value` on the returned field might not return `Some(value)`, since `value`
* may not obey all the constraints.
* @param value the value.
* @return the new Field.
*/
def withValue(value: T): Field[T]
/**
* Returns a copy of this Field but with the given `Formatter`.
* @param newFormatter the new Formatter.
*/
def withFormatter(newFormatter: Formatter[T]): Field[T]
/**
* Returns a copy of this Field but with the given `InputProvider`.
* @param newInputProvider the new InputProvider.
*/
def withInputProvider(newInputProvider: InputProvider[T]): Field[T]
/**
* Returns a copy of this Field but with an HiddenInputProvider.
* @return the new field.
*/
def hidden: Field[T] = withInputProvider(InputProviders.hiddenProvider(this))
/**
* Returns a copy of this Field with the new constraints added.
* @param constraints the constraints to add.
*/
def verifying(constraints: Constraint[T]*): Field[T]
/**
* @return the list of all distinct constraints of this field including the subconstraints of the constraints.
*/
val allDistinctConstraints: Seq[Constraint[T]] = constraints.flatMap(_.allSubConstraints).distinct
}
case class RequiredField[T](constraints: Seq[Constraint[T]] = Seq(Constraints.requiredAny), data: Option[String] = None)
(implicit val formatter: Formatter[T], val inputProviderCreator: Field[T] => InputProvider[T]) extends Field[T] {
/**
* _value will be:
* · None - if a None was received in `data`.
* · Some[Either[Seq[FormError], T]] - otherwise
* · Right - if `data` was successfully formatted and all `constraints` are valid.
* . Left - otherwise
*/
private val _value: Option[Either[Seq[FormError], T]] = data.map{ data =>
formatter.toType(data).fold(
error => Left(Seq(error)),
t => {
val constraintErrors: Seq[FormError] = constraints.map(_.validate(t)).collect {
case Invalid(error) => error
}
Either.cond(constraintErrors.isEmpty, t, constraintErrors)
}
)
}
def hasErrors: Boolean = _value.exists(_.isLeft)
def errors: Seq[FormError] = _value.flatMap(_.left.toOption).getOrElse(Seq())
def isValid: Boolean = hasErrors == false
def value: Option[T] = _value.flatMap(_.right.toOption)
val inputProvider: InputProvider[T] = inputProviderCreator(this)
def optional: Field[Option[T]] = new OptionalField[T](this)
def withData(data: Option[String]): Field[T] = this.copy(data = data)
def withValue(value: T): Field[T] = this.copy(data = Some(formatter.toString(value)))
def withFormatter(newFormatter: Formatter[T]): Field[T] = this.copy()(newFormatter, inputProviderCreator)
def withInputProvider(newInputProvider: InputProvider[T]): Field[T] = this.copy()(formatter, (f: Field[T]) => newInputProvider.withField(f))
def verifying(constraints: Constraint[T]*): Field[T] = this.copy(constraints = this.constraints ++ constraints)
}
case class OptionalField[T](innerField: Field[T]) extends Field[Option[T]] {
lazy val constraints: Seq[Constraint[Option[T]]] = innerField.constraints.map(c => Constraint.toOptionalConstraint(c))
def data: Option[String] = innerField.data
lazy val formatter: Formatter[Option[T]] = Formatter.toOptionalFormatter(innerField.formatter)
lazy val inputProvider: InputProvider[Option[T]] = InputProvider.toOptionalInputProvider(innerField.inputProvider)
def hasErrors: Boolean = innerField.hasErrors
def errors: Seq[FormError] = innerField.errors
def isValid: Boolean = innerField.isValid
def value: Option[Option[T]] = Some(innerField.value)
def withData(data: Option[String]): Field[Option[T]] = this.copy(innerField.withData(data))
def withValue(value: Option[T]): Field[Option[T]] = value.fold(this)(v => this.copy(innerField.withValue(v)))
def withFormatter(newFormatter: Formatter[Option[T]]): Field[Option[T]] = {
this.copy(innerField.withFormatter(Formatter.toFormatter(newFormatter)))
}
def withInputProvider(newInputProvider: InputProvider[Option[T]]): Field[Option[T]] = {
this.copy(innerField.withInputProvider(InputProvider.toInputProvider(newInputProvider)))
}
def verifying(constraints: Constraint[Option[T]]*): Field[Option[T]] = {
this.copy(innerField.verifying(constraints.map(c => Constraint.toConstraint(c)):_*))
}
}
object Fields {
import Constraints._
import Formats._
import InputProviders._
implicit val empty = new RequiredField[Nothing]()(emptyFormat, emptyProvider)
implicit val text = new RequiredField[String]()(stringFormat, textProvider)
implicit val number = new RequiredField[Int]()(intFormat, intProvider)
//TODO: add more fields
}
| Lasering/Oil | project-code/app/oil/Fields.scala | Scala | apache-2.0 | 5,757 |
package com.atomist.rug.kind.properties
import com.atomist.graph.GraphNode
import com.atomist.rug.kind.core.{DirectoryMutableView, FileMutableView, ProjectMutableView}
import com.atomist.rug.spi.{MutableView, ReflectivelyTypedType, Type}
class PropertiesType
extends Type
with ReflectivelyTypedType {
override def description = "Java properties file"
override def runtimeClass: Class[PropertiesMutableView] = classOf[PropertiesMutableView]
override def findAllIn(context: GraphNode): Option[Seq[MutableView[_]]] = {
context match {
case fmv: FileMutableView =>
Some(Seq(fmv.originalBackingObject)
.filter(f => f.name.endsWith(".properties"))
.map(f => new PropertiesMutableView(f, fmv.parent)))
case dmv: DirectoryMutableView =>
Some(dmv.originalBackingObject.files
.filter(f => f.name.endsWith(".properties"))
.map(f => new PropertiesMutableView(f, dmv.parent)))
case pmv: ProjectMutableView =>
Some(pmv.originalBackingObject.allFiles
.filter(f => f.name.endsWith(".properties"))
.map(f => new PropertiesMutableView(f, pmv)))
}
}
}
| atomist/rug | src/main/scala/com/atomist/rug/kind/properties/PropertiesType.scala | Scala | gpl-3.0 | 1,162 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.hyperledger.network.flows
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import akka.stream.{Attributes, FlowShape, Inlet, Outlet}
import org.hyperledger.common.LoggedHyperLedgerException
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}
object AsyncStatefulStage {
type DataProcessor[M, S] = PartialFunction[M, S => Future[(S, List[M])]]
def noop[M, S]: DataProcessor[M, S] = { case _ => s => Future.successful((s, Nil)) }
}
abstract class AsyncStatefulStage[M, S](startingState: S)(implicit ec: ExecutionContext)
extends GraphStage[FlowShape[M, M]] {
import AsyncStatefulStage._
val in = Inlet[M]("in")
val out = Outlet[M]("out")
def inputProcessor: AsyncStatefulStage.DataProcessor[M, S]
lazy val liftedProcessor = inputProcessor.lift
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
var result = List.empty[M]
var state = startingState
var holdingUpstream = false
override def preStart(): Unit = tryPull(in)
setHandler(out, eagerTerminateOutput)
setHandler(in, new InHandler {
override def onPush(): Unit = {
val elem = grab(in)
(inputProcessor orElse noop)(elem)(state).onComplete(actionResultCallback.invoke)
}
})
val actionResultCallback = getAsyncCallback[Try[(S, List[M])]] {
case Failure(ex: LoggedHyperLedgerException) => tryPull(in)
case Failure(ex) => failStage(ex)
case Success((newState, messages)) =>
state = newState
result ++ messages match {
case Nil => tryPull(in)
case msgs :: newResult if !(isClosed(in) || hasBeenPulled(in)) =>
result = newResult
pushIt(msgs)
case newResult => result = newResult
}
}
private def pushIt(message: M) = {
push(out, message)
if (isClosed(in)) {
completeStage()
} else if (result.isEmpty && !hasBeenPulled(in)) {
tryPull(in)
holdingUpstream = false
}
}
}
override val shape = FlowShape(in, out)
}
| DigitalAssetCom/hlp-candidate | server/network/src/main/scala/org/hyperledger/network/flows/AsyncStatefulStage.scala | Scala | apache-2.0 | 2,741 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.{T, Table}
import scala.reflect.ClassTag
/**
* It is a module to perform matrix vector multiplication on two mini-batch inputs,
* producing a mini-batch.
*
* @param trans whether make matrix transpose before multiplication
*/
@SerialVersionUID(- 555327285289166316L)
class MV[T: ClassTag](val trans: Boolean = false)
(implicit ev: TensorNumeric[T]) extends AbstractModule[Table, Tensor[T], T] {
gradInput = T(Tensor[T], Tensor[T]())
private def checkInputFormat(input: Table): (Tensor[T], Tensor[T]) = {
require(input.length() == 2 && input(1).isInstanceOf[Tensor[T]] &&
input(2).isInstanceOf[Tensor[T]], "Input must be two tensors")
val m: Tensor[T] = input(1)
val v: Tensor[T] = input(2)
require(m.dim() == 2 || m.dim() == 3, "input matrix must be 2D or 3D")
require(v.dim() == 1 || v.dim() == 2, "input vector must be 1D or 2D")
(m, v)
}
override def updateOutput(input: Table): Tensor[T] = {
var (m, v) = checkInputFormat(input)
if (m.dim() == 2) {
require(v.dim() == 1, "vector must be 1D")
if (trans) {
m = m.transpose(1, 2)
}
require(m.size(2) == v.size(1), "matrix row count and vector length do not match")
output.resize(m.size(1))
output.mv(m, v)
} else {
require(v.dim() == 2, "vector must be 2D (batch dimension)")
require(m.size(1) == v.size(1), "inputs must contain the same number of minibatches")
if (trans) {
m = m.transpose(2, 3)
}
require(m.size(3) == v.size(2), "matrix row count and vector length do not match")
output.resize(m.size(1), m.size(2), 1)
output.bmm(m, v.view(v.size(1), v.size(2), 1)).resize(m.size(1), m.size(2))
}
output
}
override def updateGradInput(input: Table, gradOutput: Tensor[T]): Table = {
val (m, v) = checkInputFormat(input)
gradInput[Tensor[T]](1).resizeAs(m)
gradInput[Tensor[T]](2).resizeAs(v)
require(gradOutput.dim() == 1 || gradOutput.dim() == 2,
"arguments must be a 1D or 2D Tensor")
if (gradOutput.dim() == 2) {
require(m.dim() == 3, "matrix must must be 3D (batched)")
require(v.dim() == 2, "vector must be 2D (batched)")
val bdim = m.size(1)
val odim = m.size(2)
val idim = m.size(3)
if (trans) {
gradInput[Tensor[T]](1).bmm(v.view(bdim, odim, 1), gradOutput.view(bdim, 1, idim))
gradInput[Tensor[T]](2).view(bdim, odim, 1).bmm(m, gradOutput.view(bdim, odim, 1))
} else {
gradInput[Tensor[T]](1).bmm(gradOutput.view(bdim, odim, 1), v.view(bdim, 1, idim))
gradInput[Tensor[T]](2).view(bdim, odim, 1).bmm(m.transpose(2, 3), gradOutput.view(bdim,
odim, 1))
}
} else {
require(m.dim() == 2, "matrix must be 2D")
require(v.dim() == 1, "vector must be 1D")
if (trans) {
gradInput[Tensor[T]](1).set(v.clone().resize(v.size(1), 1) *
gradOutput.clone().resize(1, gradOutput.size(1)))
gradInput[Tensor[T]](2).set(m * gradOutput)
} else {
gradInput[Tensor[T]](1).set(gradOutput.clone().resize(gradOutput.size(1), 1) *
v.clone().resize(1, v.size(1)))
gradInput[Tensor[T]](2).set(m.t() * gradOutput)
}
}
gradInput
}
override def toString: String = s"MV()"
override def canEqual(other: Any): Boolean = other.isInstanceOf[MV[T]]
override def equals(other: Any): Boolean = other match {
case that: MV[T] =>
super.equals(that) &&
(that canEqual this) &&
trans == that.trans
case _ => false
}
override def hashCode(): Int = {
val state = Seq(super.hashCode(), trans)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
override def clearState(): MV.this.type = {
super.clearState()
gradInput[Tensor[T]](1).set()
gradInput[Tensor[T]](2).set()
this
}
}
object MV {
def apply[@specialized(Float, Double) T: ClassTag](
trans: Boolean = false)(implicit ev: TensorNumeric[T]) : MV[T] = {
new MV[T](trans)
}
}
| psyyz10/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/MV.scala | Scala | apache-2.0 | 4,898 |
package org.aprsdroid.app
import _root_.android.content.Context
import _root_.android.location.{Location, LocationManager}
import _root_.android.media.AudioManager
import _root_.android.preference.PreferenceManager
class PrefsWrapper(val context : Context) {
val prefs = PreferenceManager.getDefaultSharedPreferences(context)
// wrap the "dumb" methods
def getString(key : String, defValue : String) = prefs.getString(key, defValue)
def getBoolean(key : String, defValue : Boolean) = prefs.getBoolean(key, defValue)
// safely read integers
def getStringInt(key : String, defValue : Int) = {
try { prefs.getString(key, null).trim.toInt } catch { case _ : Throwable => defValue }
}
// safely read integers
def getStringFloat(key : String, defValue : Float) = {
try { prefs.getString(key, null).trim.toFloat } catch { case _ : Throwable => defValue }
}
// return commonly used prefs
def getCallsign() = prefs.getString("callsign", "").trim().toUpperCase()
def getPasscode() = prefs.getString("passcode", "") match {
case "" => "-1"
case s => s
}
def getSsid() = getString("ssid", "10")
def getCallSsid() = AprsPacket.formatCallSsid(getCallsign(), getSsid())
def toggleBoolean(name : String, default : Boolean) = {
val new_val = !prefs.getBoolean(name, default)
android.util.Log.d("toggleBoolean", name + "=" + new_val)
setBoolean(name, new_val)
}
def setBoolean(name : String, new_val : Boolean) = {
prefs.edit().putBoolean(name, new_val).commit()
new_val
}
def set(name : String, new_val : String) = {
prefs.edit().putString(name, new_val).commit()
new_val
}
def getShowObjects() = prefs.getBoolean("show_objects", true)
def getShowSatellite() = prefs.getBoolean("show_satellite", false)
def getShowAge() = getStringInt("show_age", 30)*60L*1000
// get the array index for a given list pref
def getListItemIndex(pref : String, default : String, values : Int) = {
android.util.Log.d("getLII", getString(pref, default))
android.util.Log.d("getLII", "values: " + context.getResources().getStringArray(values).mkString(" "))
context.getResources().getStringArray(values).indexOf(getString(pref, default))
}
def getListItemName(pref : String, default : String, values : Int, names : Int) = {
val id = getListItemIndex(pref, default, values)
android.util.Log.d("getLIN", "id is " + id)
if (id < 0)
"<not in list>"
else
context.getResources().getStringArray(names)(id)
}
def getLocationSourceName() = {
getListItemName("loc_source", LocationSource.DEFAULT_CONNTYPE,
R.array.p_locsource_ev, R.array.p_locsource_e)
}
def getBackendName() = {
val proto = getListItemName("proto", AprsBackend.DEFAULT_PROTO,
R.array.p_conntype_ev, R.array.p_conntype_e)
val link = AprsBackend.defaultProtoInfo(this).link
link match {
case "aprsis" => "%s, %s".format(proto, getListItemName(link, AprsBackend.DEFAULT_CONNTYPE, R.array.p_aprsis_ev, R.array.p_aprsis_e))
case "link" => "%s, %s".format(proto, getListItemName(link, AprsBackend.DEFAULT_CONNTYPE, R.array.p_link_ev, R.array.p_link_e))
case _ => proto
}
}
// this is actually a hack!
def getVersion() = context.getString(R.string.build_version).split(" ").take(2).mkString(" ")
def getLoginString() = AprsPacket.formatLogin(getCallsign(), getSsid(),
getPasscode(), getVersion())
def getFilterString(service : AprsService) : String = {
val filterdist = getStringInt("tcp.filterdist", 50)
val userfilter = getString("tcp.filter", "")
val lastloc = try {
val locMan = service.getSystemService(Context.LOCATION_SERVICE).asInstanceOf[LocationManager]
AprsPacket.formatRangeFilter(
locMan.getLastKnownLocation(PeriodicGPS.bestProvider(locMan)), filterdist)
} catch {
case e : IllegalArgumentException => ""
}
if (filterdist == 0) return " filter %s %s".format(userfilter, lastloc)
else return " filter m/%d %s %s".format(filterdist, userfilter, lastloc)
}
def getProto() = getString("proto", "aprsis")
def getAfskHQ() = getBoolean("afsk.hqdemod", true)
def getAfskBluetooth() = getBoolean("afsk.btsco", false) && getAfskHQ()
def getAfskOutput() = if (getAfskBluetooth()) AudioManager.STREAM_VOICE_CALL else getStringInt("afsk.output", 0)
}
| ge0rg/aprsdroid | src/PrefsWrapper.scala | Scala | gpl-2.0 | 4,240 |
import sbt._, Keys._
object CleanShadedPlugin extends AutoPlugin {
override def requires = plugins.IvyPlugin
override def trigger = allRequirements
object autoImport {
val cleanCacheIvyDirectory: SettingKey[File] = settingKey[File]("")
val cleanCache: InputKey[Unit] = inputKey[Unit]("")
val cleanCacheFiles: InputKey[Seq[File]] = inputKey[Seq[File]]("")
val cleanLocal: InputKey[Unit] = inputKey[Unit]("")
val cleanLocalFiles: InputKey[Seq[File]] = inputKey[Seq[File]]("")
}
import autoImport._
object CleanShaded {
import sbt.complete.Parser
import sbt.complete.DefaultParsers._
final case class ModuleParam(organization: String, name: Option[String])
def parseParam: Parser[Option[ModuleParam]] =
((parseOrg ~ parseName.?) map {
case o ~ n => ModuleParam(o, n)
}).?
private def parseOrg: Parser[String] =
(Space ~> token(StringBasic.examples("\\"organization\\"")))
private def parseName: Parser[String] =
(Space ~> token(token("%") ~> Space ~> StringBasic.examples("\\"name\\"")))
def query(base: File, param: Option[ModuleParam], org: String, name: String): Seq[File] =
(param match {
case None => base ** ("*" + org + "*") ** ("*" + name + "*")
case Some(ModuleParam("*", None)) => base ** "*"
case Some(ModuleParam(o, None | Some("*"))) => base ** ("*" + o + "*") ** "*"
case Some(ModuleParam(o, Some(n))) => base ** ("*" + o + "*") ** ("*" + n + "*")
}).get
}
override def projectSettings = Seq(
cleanCacheIvyDirectory := ivyPaths.value.ivyHome getOrElse (Path.userHome / ".ivy2"),
cleanCache := IO.delete(cleanCacheFiles.evaluated),
cleanLocal := IO.delete(cleanLocalFiles.evaluated),
cleanCacheFiles := {
val base = cleanCacheIvyDirectory.value / "cache"
val param = CleanShaded.parseParam.parsed
CleanShaded.query(base, param, organization.value, moduleName.value)
},
cleanLocalFiles := {
val base = cleanCacheIvyDirectory.value / "local"
val param = CleanShaded.parseParam.parsed
CleanShaded.query(base, param, organization.value, moduleName.value)
}
)
} | envisia/sbt-closure | project/CleanShadedPlugin.scala | Scala | mit | 2,267 |
package nagoyaka
import scalaz.syntax.std.string._
import org.scalacheck.Properties
import org.scalacheck.Gen
import org.scalacheck.Prop.forAll
object FizzBuzzSpec extends Properties("FizzBuzz") {
val inverseInput = Gen.oneOf(
Array("fizz"),
Array("buzz" ),
Array("fizz", "buzz"),
Array("buzz", "fizz"),
Array("fizz", "buzz", "fizz"),
Array("fizz", "fizz"),
Array("fizz", "fizz", "buzz")
)
property("inverse") = forAll (inverseInput) { (input: Array[String]) =>
val actual =
FizzBuzz.inverseFizzBuzz(input)
.get()
.map(FizzBuzz.toFizzBuzz(_))
.filter(!_.parseInt.isSuccess)
actual sameElements input
}
}
| pocketberserker/nagoyaka-java-test | src/test/scala/nagoyaka/FizzBuzzSpec.scala | Scala | mit | 714 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.action.ws.fsm
final class WsClosedState(fsm: WsFsm) extends WsState(fsm) {
override protected def remainingReconnects: Int = 0
}
| gatling/gatling | gatling-http/src/main/scala/io/gatling/http/action/ws/fsm/WsClosedState.scala | Scala | apache-2.0 | 775 |
package fr.univ_lille.cristal.emeraude.chasqui.tests
import akka.testkit.TestActorRef
import fr.univ_lille.cristal.emeraude.chasqui.core.NodeImpl
import fr.univ_lille.cristal.emeraude.chasqui.core.synchronization.GlobalSynchronizerWithLocalQueueSingletonActor
import fr.univ_lille.cristal.emeraude.chasqui.core.synchronization.GlobalSynchronizerWithLocalQueueStrategy.RegisterNode
/**
* Created by guille on 10/04/17.
*/
class GlobalSynchronizerWithLocalQueueSpec extends ChasquiBaseSpec {
"A global synchronizer" should "not advance if not all nodes are ready" in {
val actorRef = TestActorRef[GlobalSynchronizerWithLocalQueueSingletonActor]
val node = mock[TestActorRef[NodeImpl]]
actorRef ! RegisterNode(node)
}
}
| guillep/chasqui | src/test/scala/fr/univ_lille/cristal/emeraude/chasqui/tests/GlobalSynchronizerWithLocalQueueSpec.scala | Scala | mit | 740 |
package rds
import com.amazonaws.auth.{InstanceProfileCredentialsProvider, EnvironmentVariableCredentialsProvider}
import com.amazonaws.services.rds.AmazonRDSClient
import com.amazonaws.services.rds.model._
import play.api.{Play, Logger}
import scala.collection.JavaConverters._
import play.api.Play.current
import scala.util.{Failure, Success, Try}
object StagingHelper {
def waitForReady(rdsClient: AmazonRDSClient, instanceName: String): Boolean = {
val sleepTime: Long = 30
var count: Int = 0
var renameReady = false
Logger.info("Waiting for instance to be active...")
Thread.sleep(30 * 1000) // It can take a while for status renaming to register
Logger.debug("Done with initial sleep")
while (!renameReady) {
val status = try {
val request: DescribeDBInstancesRequest = new DescribeDBInstancesRequest()
request.setDBInstanceIdentifier(instanceName)
val result = rdsClient.describeDBInstances(request)
result.getDBInstances.get(0).getDBInstanceStatus
} catch {
case none: DBInstanceNotFoundException =>
Logger.debug("No instance found yet...")
"unavailable"
case e: Exception => throw new Exception(e)
}
if (status.equalsIgnoreCase("available")) {
renameReady = true
}
else {
count = count + 1
Thread.sleep(sleepTime * 1000)
}
if (count > 200) {
throw new Exception("Active wait took too long.")
}
}
Logger.info("Instance now active.")
true
}
def refreshStaging: Try[(String, String)] = {
Logger.info("Updating staging db...")
try {
val stagingName = Play.application.configuration.getString("staging.name").get
val stagingMaster = Play.application.configuration.getString("staging.master").get
val stagingOldName = stagingName + "-old"
val stagingAZ = Play.application.configuration.getString("staging.az").get
val stagingClass = Play.application.configuration.getString("staging.class").get
val stagingSubnet = Play.application.configuration.getString("staging.subnet").get
val stagingSec = List(Play.application.configuration.getString("staging.vpcSecGroupId").get).asJava
// Locally you will need to have environment keys but production uses machine role
// https://docs.aws.amazon.com/AWSSdkDocsJava/latest/DeveloperGuide/java-dg-roles.html
val rdsClient: AmazonRDSClient =
if (Play.isDev)
new AmazonRDSClient(new EnvironmentVariableCredentialsProvider())
else
new AmazonRDSClient(new InstanceProfileCredentialsProvider())
val request: DescribeDBSnapshotsRequest = new DescribeDBSnapshotsRequest()
request.setDBInstanceIdentifier(stagingMaster)
// Not sortable on api? We need latest....doesn't seem to give us latest.
request.setMaxRecords(100)
val result: DescribeDBSnapshotsResult = rdsClient.describeDBSnapshots(request)
val list: java.util.List[DBSnapshot] = result.getDBSnapshots
Logger.debug(s"list length = ${list.size}")
if (list.isEmpty) {
Success("error" -> "No snapshots found.")
} else {
// Sort by latest
val snapshot = list.asScala.sortBy(-_.getSnapshotCreateTime.getTime).head
Logger.debug(s"Snapshot name: ${snapshot.getDBSnapshotIdentifier}")
// Rename staging if exists
val instanceRequest: DescribeDBInstancesRequest = new DescribeDBInstancesRequest()
instanceRequest.setDBInstanceIdentifier(stagingName)
try {
val response = rdsClient.describeDBInstances(instanceRequest)
if (response.getDBInstances.size() > 0) {
// Assumption here is that renaming is quicker than deleting
Logger.info("Renaming existing staging to old...")
val modifyDBInstanceRequest: ModifyDBInstanceRequest = new ModifyDBInstanceRequest()
modifyDBInstanceRequest.setDBInstanceIdentifier(stagingName)
modifyDBInstanceRequest.setNewDBInstanceIdentifier(stagingOldName)
modifyDBInstanceRequest.setApplyImmediately(true)
rdsClient.modifyDBInstance(modifyDBInstanceRequest)
val waitOver = waitForReady(rdsClient, stagingOldName)
// Then delete old staging
if (waitOver) {
Logger.info("Deleting renamed old staging.")
val deleteDBInstanceRequest = new DeleteDBInstanceRequest()
deleteDBInstanceRequest.setDBInstanceIdentifier(stagingOldName)
deleteDBInstanceRequest.setSkipFinalSnapshot(true)
rdsClient.deleteDBInstance(deleteDBInstanceRequest)
}
}
} catch {
case none: DBInstanceNotFoundException => Logger.info("No staging found.")
}
// Restore latest snapshot as staging
Logger.info("Restoring staging snapshot...")
val restoreRequest = new RestoreDBInstanceFromDBSnapshotRequest()
restoreRequest.setDBInstanceIdentifier(stagingName)
restoreRequest.setDBSnapshotIdentifier(snapshot.getDBSnapshotIdentifier)
restoreRequest.setAutoMinorVersionUpgrade(true)
restoreRequest.setAvailabilityZone(stagingAZ)
restoreRequest.setDBInstanceClass(stagingClass)
restoreRequest.setMultiAZ(false)
restoreRequest.setPubliclyAccessible(true)
restoreRequest.setStorageType("gp2") // SSD
restoreRequest.setDBSubnetGroupName(stagingSubnet)
rdsClient.restoreDBInstanceFromDBSnapshot(restoreRequest)
// Set backup window to 0
val waitOver = waitForReady(rdsClient, stagingName)
if (waitOver) {
Logger.info("Modifying snapshot to desired settings...")
val modifyDBInstanceRequest: ModifyDBInstanceRequest = new ModifyDBInstanceRequest()
modifyDBInstanceRequest.setDBInstanceIdentifier(stagingName)
modifyDBInstanceRequest.setBackupRetentionPeriod(0)
modifyDBInstanceRequest.setApplyImmediately(true)
modifyDBInstanceRequest.setVpcSecurityGroupIds(stagingSec)
rdsClient.modifyDBInstance(modifyDBInstanceRequest)
}
Success("success" -> "Staging is refreshing now. Monitor AWS for completion.")
}
} catch {
case e: Exception => Failure(e)
}
}
} | Enalmada/play-rds | module/app/rds/StagingHelper.scala | Scala | apache-2.0 | 6,360 |
package com.tpl.lib.gui
trait ObjectDumping {
protected def dump[T](t: T)(implicit mt: Manifest[T] = null) = "%s: %s".format(t,if (mt == null) "<?>" else mt.toString)
}
| piotrb/hamcraft | src/main/scala/com/tpl/lib/gui/ObjectDumping.scala | Scala | bsd-2-clause | 172 |
// Copyright (c) 2013-2020 Rob Norris and Contributors
// This software is licensed under the MIT License (MIT).
// For more information see LICENSE or https://opensource.org/licenses/MIT
package example
import java.io.File
import cats.effect.{ IO, IOApp }
import cats.effect.syntax.monadCancel._
import cats.syntax.all._
import doobie._
import doobie.implicits._
// JDBC program using the low-level API
object FreeUsage extends IOApp.Simple {
final case class CountryCode(code: String)
def run: IO[Unit] = {
val db = Transactor.fromDriverManager[IO](
"org.h2.Driver", "jdbc:h2:mem:test;DB_CLOSE_DELAY=-1", "sa", ""
)
db.trans.apply(examples.void)
}
def examples: ConnectionIO[String] =
for {
_ <- FC.delay(println("Loading database..."))
_ <- loadDatabase(new File("example/world.sql"))
s <- speakerQuery("English", 10)
_ <- s.traverse(a => FC.delay(println(a)))
} yield "Ok"
def loadDatabase(f: File): ConnectionIO[Boolean] =
FC.prepareStatement("RUNSCRIPT FROM ? CHARSET 'UTF-8'").bracket { ps =>
FC.embed(ps, FPS.setString(1, f.getName) *> FPS.execute)
}(FC.embed(_, FPS.close))
def speakerQuery(s: String, p: Double): ConnectionIO[List[CountryCode]] =
FC.prepareStatement("SELECT COUNTRYCODE FROM COUNTRYLANGUAGE WHERE LANGUAGE = ? AND PERCENTAGE > ?").bracket { ps =>
FC.embed(ps, speakerPS(s, p))
}(FC.embed(_, FPS.close))
def speakerPS(s: String, p: Double): PreparedStatementIO[List[CountryCode]] =
for {
_ <- FPS.setString(1, s)
_ <- FPS.setDouble(2, p)
l <- FPS.executeQuery.bracket { rs =>
FPS.embed(rs, unroll(FRS.getString(1).map(CountryCode(_))))
}(FPS.embed(_, FRS.close))
} yield l
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
def unroll[A](a: ResultSetIO[A]): ResultSetIO[List[A]] = {
def unroll0(as: List[A]): ResultSetIO[List[A]] =
FRS.next >>= {
case false => as.pure[ResultSetIO]
case true => a >>= { a => unroll0(a :: as) }
}
unroll0(Nil).map(_.reverse)
}
}
| tpolecat/doobie | modules/example/src/main/scala/example/FreeUsage.scala | Scala | mit | 2,085 |
package com.eevolution.context.dictionary.infrastructure.service.impl
import java.util.UUID
import com.eevolution.context.dictionary.infrastructure.repository.SequenceAuditRepository
import com.eevolution.context.dictionary.infrastructure.service.SequenceAuditService
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.persistence.PersistentEntityRegistry
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/EmerisScala
* Created by [email protected] , www.e-evolution.com on 21/11/17.
*/
/**
* Sequence Audit Service Implementation
* @param registry
* @param sequenceAuditRepository
*/
class SequenceAuditServiceImpl (registry: PersistentEntityRegistry, sequenceAuditRepository: SequenceAuditRepository) extends SequenceAuditService {
private val DefaultPageSize = 10
override def getAll() = ServiceCall {_ => sequenceAuditRepository.getAll()}
override def getAllByPage(page : Option[Int], pageSize : Option[Int]) = ServiceCall{_ => sequenceAuditRepository.getAllByPage(page.getOrElse(0) , pageSize.getOrElse(DefaultPageSize))}
override def getById(id: Int) = ServiceCall { _ => sequenceAuditRepository.getById(id)}
override def getByUUID(uuid: UUID) = ServiceCall { _ => sequenceAuditRepository.getByUUID(uuid)}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/service/impl/SequenceAuditServiceImpl.scala | Scala | gpl-3.0 | 2,082 |
/*
* Copyright 2014-2020 Rik van der Kleij
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package intellij.haskell.psi
trait HaskellQualifiedNameElement extends HaskellCompositeElement {
def getName: String
def getIdentifierElement: HaskellNamedElement
def getQualifierName: Option[String]
}
| rikvdkleij/intellij-haskell | src/main/scala/intellij/haskell/psi/HaskellQualifiedNameElement.scala | Scala | apache-2.0 | 815 |
package com.airbnb.scheduler.jobs
import com.fasterxml.jackson.annotation.JsonProperty
import com.fasterxml.jackson.databind.annotation.JsonDeserialize
import org.joda.time.{Period, Minutes}
import com.airbnb.utils.JobDeserializer
/**
* BaseJob encapsulates job specific information. BaseJob is defined for all tasks within a job.
* At a bare minimum, it contains the command and a default epsilon value. Epsilon is the maximum allowed delay that a
* job may be triggered at - if a job cannot be scheduled within epsilon (e.g. no resources),
* the execution cycle is skipped.
* @author Florian Leibert ([email protected])
*/
//The fact that Job is a trait rather than part of this class is a problem with dropwizards json serializer which will
//omit fields defined in superclasses but not traits.
// It may be surprising that a DependencyBasedJob (DPJ) has an epsilon: If it didn't have an epsilon, and no resources
// were available and a series of DPJ based tasks have been built-up in the queue, they would all be executed
// possibly overflowing the system. Therefore, we also include an epsilon for DPJs.
// Note, that if a SBJ is the root for a DPJ, the SBJ can take an arbitrary amount of time,
// the scheduled time t of the child DPJ, will be determined once the parent completes.
trait BaseJob {
def name: String
def command: String
def epsilon: Period = Minutes.minutes(5).toPeriod
def successCount: Long = 0L
def errorCount: Long = 0L
def executor: String = ""
def executorFlags: String = ""
def retries: Int = 2
def owner: String = ""
def lastSuccess: String = ""
def lastError: String = ""
def async: Boolean = false
def cpus: Double = 0
def disk: Double = 0
def mem: Double = 0
def disabled: Boolean = false
def errorsSinceLastSuccess: Long = 0L
def uris: Seq[String] = List()
def highPriority: Boolean = false
def runAsUser: String = ""
def container: DockerContainer = null
def environmentVariables: Seq[EnvironmentVariable] = List()
def shell: Boolean = true
def arguments: Seq[String] = List()
def softError: Boolean = false
}
@JsonDeserialize(using = classOf[JobDeserializer])
case class ScheduleBasedJob(
@JsonProperty schedule: String,
@JsonProperty override val name: String,
@JsonProperty override val command: String,
@JsonProperty override val epsilon: Period = Minutes.minutes(5).toPeriod,
@JsonProperty override val successCount: Long = 0L,
@JsonProperty override val errorCount: Long = 0L,
@JsonProperty override val executor: String = "",
@JsonProperty override val executorFlags: String = "",
@JsonProperty override val retries: Int = 2,
@JsonProperty override val owner: String = "",
@JsonProperty override val lastSuccess: String = "",
@JsonProperty override val lastError: String = "",
@JsonProperty override val async: Boolean = false,
@JsonProperty override val cpus: Double = 0,
@JsonProperty override val disk: Double = 0,
@JsonProperty override val mem: Double = 0,
@JsonProperty override val disabled: Boolean = false,
@JsonProperty override val errorsSinceLastSuccess: Long = 0L,
@JsonProperty override val uris: Seq[String] = List(),
@JsonProperty override val highPriority: Boolean = false,
@JsonProperty override val runAsUser: String = "",
@JsonProperty override val container: DockerContainer = null,
@JsonProperty scheduleTimeZone : String = "",
@JsonProperty override val environmentVariables: Seq[EnvironmentVariable] = List(),
@JsonProperty override val shell: Boolean = true,
@JsonProperty override val arguments: Seq[String] = List(),
@JsonProperty override val softError: Boolean = false)
extends BaseJob
@JsonDeserialize(using = classOf[JobDeserializer])
case class DependencyBasedJob(
@JsonProperty parents: Set[String],
@JsonProperty override val name: String,
@JsonProperty override val command: String,
@JsonProperty override val epsilon: Period = Minutes.minutes(5).toPeriod,
@JsonProperty override val successCount: Long = 0L,
@JsonProperty override val errorCount: Long = 0L,
@JsonProperty override val executor: String = "",
@JsonProperty override val executorFlags: String = "",
@JsonProperty override val retries: Int = 2,
@JsonProperty override val owner: String = "",
@JsonProperty override val lastSuccess: String = "",
@JsonProperty override val lastError: String = "",
@JsonProperty override val async: Boolean = false,
@JsonProperty override val cpus: Double = 0,
@JsonProperty override val disk: Double = 0,
@JsonProperty override val mem: Double = 0,
@JsonProperty override val disabled: Boolean = false,
@JsonProperty override val errorsSinceLastSuccess: Long = 0L,
@JsonProperty override val uris: Seq[String] = List(),
@JsonProperty override val highPriority: Boolean = false,
@JsonProperty override val runAsUser: String = "",
@JsonProperty override val container: DockerContainer = null,
@JsonProperty override val environmentVariables: Seq[EnvironmentVariable] = List(),
@JsonProperty override val shell: Boolean = true,
@JsonProperty override val arguments: Seq[String] = List(),
@JsonProperty override val softError: Boolean = false)
extends BaseJob
| doronin/chronos | src/main/scala/com/airbnb/scheduler/jobs/Jobs.scala | Scala | apache-2.0 | 5,307 |
package io.continuum.bokeh
case class Selected0d(indices: List[Int] = Nil, glyph: Option[Glyph[_, _]] = None)
case class Selected1d(indices: List[Int] = Nil)
case class Selected2d(indices: List[List[Int]] = Nil)
case class Selected(`0d`: Selected0d = Selected0d(),
`1d`: Selected1d = Selected1d(),
`2d`: Selected2d = Selected2d())
@model abstract class DataSource extends Model {
object column_names extends Field[List[String]]
object selected extends Field[Selected]
object callback extends Field[Callback]
}
@model class ColumnDataSource extends DataSource { source =>
final override val typeName = "ColumnDataSource"
object data extends Field[Map[Symbol, Js.Value]]
// TODO: M should be covariant? See Color vs. NamedColor.
class Column[M[_]: ArrayLike, T:Json.Writer]
(val name: Symbol, private var _value: M[T])
(implicit fmt: Json.Writer[M[T]]) {
this := _value
def value: M[T] = _value // TODO: fmt.reads(source.data.value(name))
def :=(value: M[T]): Unit = data <<= (_ + (name -> fmt.write(_value)))
val parent: source.type = source
}
def column[M[_], T](value: M[T]): Column[M, T] = macro ColumnMacro.columnImpl[M, T]
def values[T](values: T*): Column[Seq, T] = macro ColumnMacro.valuesImpl[T]
}
private[bokeh] object ColumnMacro {
import scala.reflect.macros.blackbox.Context
def columnImpl[M[_], T](c: Context { type PrefixType = ColumnDataSource })(value: c.Expr[M[T]])
(implicit ev1: c.WeakTypeTag[M[_]], ev2: c.WeakTypeTag[T]): c.Expr[c.prefix.value.Column[M, T]] = {
import c.universe._
c.Expr[c.prefix.value.Column[M, T]](q"new Column(Symbol(${columnName(c)}), $value)")
}
def valuesImpl[T: c.WeakTypeTag](c: Context { type PrefixType = ColumnDataSource })
(values: c.Expr[T]*): c.Expr[c.prefix.value.Column[Seq, T]] = {
import c.universe._
c.Expr[c.prefix.value.Column[Seq, T]](q"new Column(Symbol(${columnName(c)}), Seq(..$values))")
}
private def columnName(c: Context): String = {
Macros.definingValName(c) getOrElse {
c.abort(c.enclosingPosition, "column must be directly assigned to a val, such as `val x1 = column(List(1.0, 2.0, 3.0))`")
}
}
}
@model abstract class RemoteSource extends DataSource {
object data_url extends Field[String]
object polling_interval extends Field[Int]
}
@model class AjaxDataSource extends RemoteSource {
object method extends Field[HTTPMethod](HTTPMethod.POST)
}
| bokeh/bokeh-scala | bokeh/src/main/scala/models/Sources.scala | Scala | mit | 2,591 |
package com.monsanto.stats.tables.clustering
/*
* The alpha value determines a tradeoff between having more clusters and having clusters that are more
* homogenous. Alphas is used in the formula that computes the probability of placing an extracted
* TopicVector into a brand new cluster.
*
* The beta is used to add a fake observation for a given vocabulary term---as if we saw that many
* extra MnMs for every color (because that is what we add to every one). The more data
* you have, the bigger beta has to be to have any effect. If you don't have a lot of data, or very
* many categories, 1.0 is a lot for beta.
*
* Beta is used for smoothing in the estimateCSmoothingFirst method.
*/
case class ModelParams(topicVectorSize: Int, alpha: Double, beta: Double)
| MonsantoCo/chinese-restaurant-process | src/main/scala/com/monsanto/stats/tables/clustering/ModelParams.scala | Scala | bsd-3-clause | 775 |
package edu.illinois.wala.classLoader
import edu.illinois.wala.Facade._
trait Wrapper {
implicit def wrapC(c: C) = new RichC(c)
implicit def wrapM(m: M) = new RichM(m)
implicit def wrapF(f: F) = new RichF(f)
implicit def unwrapPC(pc: ProgramCounter) = pc.getProgramCounter()
} | cos/WALAFacade | src/main/scala/edu/illinois/wala/classLoader/Wrapper.scala | Scala | epl-1.0 | 287 |
package com.harborx.api
import com.harborx.api.modules.CoreModule
import play.api.{Configuration, Environment}
trait SimpleEnvConfigProvider {
this: CoreModule =>
// dependence
override def configuration: Configuration = Configuration.load(environment)
override def environment: Environment = Environment.simple()
}
| harborx/play-di-example | play-macwire/test/com/harborx/api/SimpleEnvConfigProvider.scala | Scala | mit | 328 |
package com.blinkbox.books.agora.catalogue.book
import com.blinkbox.books.agora.catalogue.app.LinkHelper
import scala.concurrent.Future
import com.blinkbox.books.spray.v1.{Link => V1Link, Image => V1Image, ListPage, pageLink2Link}
import com.blinkbox.books.logging.DiagnosticExecutionContext
import scala.concurrent.ExecutionContext
import java.util.concurrent.Executors
import com.blinkbox.books.catalogue.common.Events.Book
import com.blinkbox.books.catalogue.common._
import com.blinkbox.books.spray.{Page, Paging, SortOrder}
import org.joda.time.DateTime
import org.joda.time.format.DateTimeFormat
case class BookList(books: List[Book], total: Int)
trait BookDao {
def getBookByIsbn(isbn: String): Future[Option[Book]]
def getBooks(isbns: List[String]): Future[List[Book]]
def getBooksByContributor(id: String, minDate: Option[DateTime], maxDate: Option[DateTime], offset: Int, count: Int, sortField: String, sortDescending: Boolean): Future[BookList]
def getRelatedBooks(isbn: String, offset: Int, count: Int): Future[BookList]
def getDistributionStatus(isbn: String): Future[Option[DistributionStatus]]
}
trait BookService {
def getBookByIsbn(isbn: String): Future[Option[BookRepresentation]]
def getBookSynopsis(isbn: String): Future[Option[BookSynopsis]]
def getBooks(isbns: Iterable[String], page: Page): Future[ListPage[BookRepresentation]]
def getBooksByContributor(id: String, minPubDate: Option[DateTime], maxPubDate: Option[DateTime], page: Page, order: SortOrder): Future[ListPage[BookRepresentation]]
def getRelatedBooks(isbn: String, page: Page): Future[ListPage[BookRepresentation]]
}
object BookService {
val idParam = "id"
val contributorParam = "contributor"
val minPubDateParam = "minPublicationDate"
val maxPubDateParam = "maxPublicationDate"
val dateTimeFormat = DateTimeFormat.forPattern("yyyy-MM-dd")
}
class DefaultBookService(dao: BookDao, linkHelper: LinkHelper) extends BookService {
implicit val executionContext = DiagnosticExecutionContext(ExecutionContext.fromExecutor(Executors.newCachedThreadPool))
private def isRealm(list: List[Classification], realm: String = "type", id: String) = list.exists(c => c.realm.equals(realm) && c.id.equals(id))
private val isStatic = (uri: Uri) => uri.`type`.equals("static")
private def toBookRepresentation(book: Book): BookRepresentation = {
def getWithException[T](from: Option[T], exceptionMessage: String): T = from.getOrElse(throw new IllegalArgumentException(exceptionMessage))
val media = getWithException(book.media, "'media' missing.")
val publicationDate = book.dates.map(_.publish).flatten
val sample = getSampleUri(book)
BookRepresentation(
isbn = book.isbn,
title = book.title,
publicationDate = getWithException(publicationDate, "'publicationDate' missing."),
sampleEligible = sample.isDefined,
images = getImages(media),
links = Some(generateLinks(book, media, sample))
)
}
private def getImages(media: Media): List[V1Image] = {
media.images
.filter(image => isRealm(image.classification, id="front_cover"))
.flatMap(image => image.uris)
.filter(isStatic)
.map(uri => V1Image("urn:blinkboxbooks:image:cover", uri.uri))
}
private def getSampleUri(book: Book): Option[Uri] = {
book.media.get.epubs
.filter(epub => isRealm(epub.classification, id="sample"))
.flatMap(epub => epub.uris)
.find(isStatic)
}
private def generateLinks(book: Book, media: Media, sample: Option[Uri]) : List[V1Link] = {
val bookLinks = List(
linkHelper.linkForBookSynopsis(book.isbn),
linkHelper.linkForPublisher(123, book.publisher.get), // TODO - publisher ID!!!
linkHelper.linkForBookPricing(book.isbn)
)
val contributorLinks = for (c <- book.contributors) yield linkHelper.linkForContributor(c.id, c.displayName)
val sampleLink = sample.map(uri => linkHelper.linkForSampleMedia(uri.uri))
bookLinks ++ contributorLinks ++ sampleLink
}
private def toListPage(books: List[Book], total: Int, page: Page, path: String, params: Option[Seq[(String, String)]]): ListPage[BookRepresentation] = {
val links = if(total > page.count) {
val pageLinks = Paging.links(Some(total), page.offset, page.count, linkHelper.externalUrl.path.toString + path, params, includeSelf=false)
Some(pageLinks.toList.map(pageLink2Link))
}
else {
None
}
ListPage(total, page.offset, books.size, books.map(toBookRepresentation), links)
}
override def getBookByIsbn(isbn: String): Future[Option[BookRepresentation]] = {
dao.getBookByIsbn(isbn).map(_.map(book => toBookRepresentation(book)))
}
override def getBookSynopsis(isbn: String): Future[Option[BookSynopsis]] = {
def toSynopsis(book: Book) = {
val isMainDescription = (desc: OtherText) => isRealm(desc.classification, "source", "Main description")
val list = for(desc <- book.descriptions; if isMainDescription(desc)) yield desc.content
if(list.isEmpty) "" else list.head
}
dao.getBookByIsbn(isbn).map(_.map(book => BookSynopsis(book.isbn, toSynopsis(book))))
}
override def getBooks(isbns: Iterable[String], page: Page): Future[ListPage[BookRepresentation]] = {
val slice = isbns.slice(page.offset, page.offset + page.count).toList
val params = Some(isbns.toSeq.map(isbn => (BookService.idParam, isbn)))
dao.getBooks(slice) map { books => toListPage(books, isbns.size, page, linkHelper.bookPath, params) }
}
override def getBooksByContributor(id: String, minPubDate: Option[DateTime], maxPubDate: Option[DateTime], page: Page, order: SortOrder): Future[ListPage[BookRepresentation]] = {
def dateQueryParam(param: String, date: Option[DateTime]):Option[(String, String)] = date.map(d => (param, BookService.dateTimeFormat.print(d)))
val params = Seq(
Some((BookService.contributorParam, id)),
dateQueryParam(BookService.minPubDateParam, minPubDate),
dateQueryParam(BookService.maxPubDateParam, maxPubDate)
)
val res = dao.getBooksByContributor(id, minPubDate, maxPubDate, page.offset, page.count, order.field, order.desc)
res map { bookList => toListPage(bookList.books, bookList.total, page, linkHelper.bookPath, Some(params.flatten ++ order.asQueryParams)) }
}
override def getRelatedBooks(isbn: String, page: Page): Future[ListPage[BookRepresentation]] = {
val res = dao.getRelatedBooks(isbn, page.offset, page.count)
res map { bookList => ListPage(bookList.total, page.offset, page.count, bookList.books.map(toBookRepresentation), None) }
}
}
| blinkboxbooks/catalogue-v2.scala | catalogue2-service-public/src/main/scala/com/blinkbox/books/agora/catalogue/book/BookService.scala | Scala | mit | 6,643 |
/**
* Copyright (c) 2012 Petr Kozelek <[email protected]>
*
* The full copyright and license information is presented
* in the file LICENSE that was distributed with this source code.
*/
package mql
object Todo {
def apply(): Nothing = apply("")
def apply(message: String): Nothing = throw new UnsupportedOperationException("Not implemented: TODO " + message)
}
| footcha/MQL | src/main/scala/mql/Todo.scala | Scala | bsd-3-clause | 378 |
package pl.combosolutions.backup.psm.repositories
import pl.combosolutions.backup.{ Cleaner, Async }
import pl.combosolutions.backup.psm.ImplementationPriority._
import pl.combosolutions.backup.psm.ImplementationResolver
import pl.combosolutions.backup.psm.PsmExceptionMessages.NoRepositoriesAvailable
import pl.combosolutions.backup.psm.elevation.{ ElevationMode, ObligatoryElevationMode }
import pl.combosolutions.backup.psm.repositories.posix.linux.AptRepositoriesServiceComponent
import RepositoriesServiceComponentImpl.resolve
trait RepositoriesService {
val repositoriesAvailable: Boolean
val repositoriesPriority: ImplementationPriority
// format: OFF
type Repositories = List[Repository]
def obtainRepositories(implicit withElevation: ElevationMode, cleaner: Cleaner): Async[Repositories]
def addRepositories(repositories: Repositories)
(implicit withElevation: ObligatoryElevationMode, cleaner: Cleaner): Async[Boolean]
def removeRepositories(repositories: Repositories)
(implicit withElevation: ObligatoryElevationMode, cleaner: Cleaner): Async[Boolean]
def updateRepositories(implicit withElevation: ObligatoryElevationMode, cleaner: Cleaner): Async[Boolean]
type Packages = List[Package]
def areAllInstalled(packages: Packages)(implicit withElevation: ElevationMode, cleaner: Cleaner): Async[Boolean]
def installAll(packages: Packages)
(implicit withElevation: ObligatoryElevationMode, cleaner: Cleaner): Async[Boolean]
// format: ON
}
trait RepositoriesServiceComponent {
def repositoriesService: RepositoriesService
}
// $COVERAGE-OFF$ Implementation resolution should be checked on each implementation level
object RepositoriesServiceComponentImpl extends ImplementationResolver[RepositoriesService] {
override lazy val implementations = Seq(
// Linux repositories
AptRepositoriesServiceComponent.repositoriesService
)
override lazy val notFoundMessage = NoRepositoriesAvailable
override def byFilter(service: RepositoriesService): Boolean = service.repositoriesAvailable
override def byPriority(service: RepositoriesService): ImplementationPriority = service.repositoriesPriority
}
trait RepositoriesServiceComponentImpl extends RepositoriesServiceComponent {
override lazy val repositoriesService = resolve
}
// $COVERAGE-ON$
| MateuszKubuszok/BackupDSL | modules/psm/src/main/scala/pl/combosolutions/backup/psm/repositories/RepositoriesService.scala | Scala | mit | 2,366 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package connectors
import common.enums.AddressLookupJourneyIdentifier._
import config.{AddressLookupConfiguration, FrontendAppConfig}
import itutil.IntegrationSpecBase
import models.api.{Address, Country}
import play.api.i18n.{Lang, MessagesApi}
import play.api.mvc.Call
import services.AddressLookupService
import support.AppAndStubs
import uk.gov.hmrc.http.{HeaderCarrier, NotFoundException, Upstream5xxResponse}
import play.api.test.Helpers._
class AddressLookupConnectorISpec extends IntegrationSpecBase with AppAndStubs {
val alfConnector: AddressLookupConnector = app.injector.instanceOf[AddressLookupConnector]
val addressLookupService: AddressLookupService = app.injector.instanceOf[AddressLookupService]
implicit val messagesApi: MessagesApi = app.injector.instanceOf[MessagesApi]
implicit val appConfig: FrontendAppConfig = app.injector.instanceOf[FrontendAppConfig]
implicit val messages = messagesApi.preferred(Seq(Lang("en")))
"getting an address out of Address Lookup Frontend" should {
"obtain expected address from JSON response from ALF" when {
"address is found in ALF" in {
given()
.address("addressId", "16 Coniston Court", "Holland road", "UK", "BN3 1JU").isFound
await(alfConnector.getAddress("addressId")) mustBe Address(
line1 = "16 Coniston Court",
line2 = Some("Holland road"),
country = Some(Country(Some("UK"), Some("United Kingdom"))),
postcode = Some("BN3 1JU"),
addressValidated = true
)
}
}
"throw a NotFoundException" when {
"address is not found in ALF" in {
given()
.address("addressId", "16 Coniston Court", "Holland road", "United Kingdom", "BN3 1JU").isNotFound
intercept[NotFoundException] {
await(alfConnector.getAddress("addressId"))
}
}
}
}
"initialising ALF journey" should {
implicit val hc: HeaderCarrier = HeaderCarrier()
val journeyModel = new AddressLookupConfiguration()(appConfig, messagesApi)(homeAddress, Call("GET", "continueUrl"))
"return a URL for redirecting the user off to ALF" when {
"Location header is present" in {
given()
.alfeJourney.initialisedSuccessfully()
await(alfConnector.getOnRampUrl(journeyModel)) mustBe Call("GET", "continueUrl")
}
}
"throw ALFLocationHeaderNotSetException" when {
"no Location header received from ALF" in {
given()
.alfeJourney.notInitialisedAsExpected()
intercept[ALFLocationHeaderNotSetException] {
await(alfConnector.getOnRampUrl(journeyModel))
}
}
}
"throw Upstream5xxResponse exception" when {
"ALF fails to handle the request" in {
given()
.alfeJourney.failedToInitialise()
intercept[Upstream5xxResponse] {
await(alfConnector.getOnRampUrl(journeyModel))
}
}
}
}
}
| hmrc/vat-registration-frontend | it/connectors/AddressLookupConnectorISpec.scala | Scala | apache-2.0 | 3,556 |
package V2
/**
* First-class interpreter with algebraic data types support.
*/
object DRCFAEInterp extends App {
sealed abstract class DRCFAE
case class Num(n: Int) extends DRCFAE
case class Add(lhs: DRCFAE, rhs: DRCFAE) extends DRCFAE
case class Sub(lhs: DRCFAE, rhs: DRCFAE) extends DRCFAE
case class Mult(lhs: DRCFAE, rhs: DRCFAE) extends DRCFAE
case class Id(name: Symbol) extends DRCFAE
case class Fun(param: Symbol, body: DRCFAE) extends DRCFAE
case class App(funExpr: DRCFAE, argExpr: DRCFAE) extends DRCFAE
case class If0(test: DRCFAE, thenBody: DRCFAE, elseBody: DRCFAE) extends DRCFAE
case class Rec(name: Symbol, namedExpr: DRCFAE, body: DRCFAE) extends DRCFAE
case class Ctor(name: Symbol, args: List[DRCFAE]) extends DRCFAE
case class Match(expr: DRCFAE, cases: List[(Symbol, List[Symbol], DRCFAE)]) extends DRCFAE
implicit def symbolToExpr(symbol: Symbol) = Id(symbol)
implicit def intToExpr(n: Int) = Num(n)
// Allows us to write "Ctor('x)" instead of "Ctor('x, List())"
object Ctor {
def apply(name: Symbol): Ctor = Ctor(name, List())
}
// Values override toString for a bit nicer printing
sealed abstract class Val
case class NumV(n: Int) extends Val {
override def toString = n.toString
}
case class CtorV(name: Symbol, args: List[Val]) extends Val
case class ClosureV(param: Symbol, body: DRCFAE, env: Env) extends Val {
override def toString = "(" + param.toString + ") => {" + body.toString + "}"
}
type Env = scala.collection.Map[Symbol, Val]
def interp(expr: DRCFAE, env: Env = Map.empty): Val = expr match {
case Num(n) => NumV(n)
case Add(lhs, rhs) =>
val lhsV = interp(lhs, env)
val rhsV = interp(rhs, env)
(lhsV, rhsV) match {
case (NumV(n1), NumV(n2)) => NumV(n1 + n2)
case _ => sys.error("can only add numbers, but got: " +(lhsV, rhsV))
}
case Sub(lhs, rhs) =>
val lhsV = interp(lhs, env)
val rhsV = interp(rhs, env)
(lhsV, rhsV) match {
case (NumV(n1), NumV(n2)) => NumV(n1 - n2)
case _ => sys.error("can only subtract numbers, but got: " +(lhsV, rhsV))
}
case Mult(lhs, rhs) =>
val lhsV = interp(lhs, env)
val rhsV = interp(rhs, env)
(lhsV, rhsV) match {
case (NumV(n1), NumV(n2)) => NumV(n1 * n2)
case _ => sys.error("can only multiply numbers, but got: " +(lhsV, rhsV))
}
case Id(name) => env(name)
case Fun(param, body) => ClosureV(param, body, env)
case App(funExpr, argExpr) =>
interp(funExpr, env) match {
case ClosureV(funParam, funBody, funEnv) =>
interp(funBody, funEnv + (funParam -> interp(argExpr, env)))
case v => sys.error("can only apply functions, but got: " + v)
}
case If0(test, thenBody, elseBody) =>
interp(test, env) match {
case NumV(n) => interp(if (n == 0) thenBody else elseBody, env)
case v => sys.error("can only test numbers, but got: " + v)
}
case Rec(name, namedExpr, body) =>
val recEnv = collection.mutable.Map() ++ env
recEnv += name -> interp(namedExpr, recEnv)
interp(body, recEnv)
case Ctor(name, args) =>
CtorV(name, args.map(interp(_, env)))
case Match(expr, cases) => interp(expr, env) match {
case CtorV(name, argsV) => cases.collectFirst {
//Partial function
case(caseName, args, caseExpr) if(name == caseName) => interp(caseExpr, env ++ (args.zip(argsV)))
} match {
case Some(value) => value
case _ => sys.error("No matching case found")
}
case _ => sys.error("Can only match ctor.")
}
}
val trueV = Ctor('true)
val falseV = Ctor('false)
val zero = Ctor('zero)
val one = Ctor('succ, List(zero))
val two = Ctor('succ, List(one))
val nil = Ctor('nil)
val oneZeroList = Ctor('cons, List(one, Ctor('cons, List(zero, nil))))
val countdownList = Ctor('cons, List(two, oneZeroList))
val isNotZero = Fun('x, Match('x, List(
('zero, List(), falseV),
('succ, List('prev), trueV)
)))
val filterNatList = Rec('filter,
Fun('list, Match('list, List(
('nil, List(), nil),
('cons, List('x, 'xs), Match(App(isNotZero, 'x), List(
('false, List(), App('filter, 'xs)),
('true, List(), Ctor('cons, List('x, App('filter, 'xs))))
)
)
)))),
App('filter, countdownList))
println(interp(filterNatList))
}
| Tooa/interpreters | src/V2/DRCFAEInterp.scala | Scala | apache-2.0 | 4,437 |
package eva4s
package util
import language.higherKinds
import scala.collection.Seq
trait Sorter[M[_]] {
def sortWith[A](xs: M[A])(lt: (A,A) => Boolean): M[A] = sort(xs)(Ordering fromLessThan lt)
def sortBy[A,B:Ordering](xs: M[A])(f: A => B): M[A] = sort(xs)(implicitly[Ordering[B]] on f)
def sort[A:Ordering](xs: M[A]): M[A]
}
trait SorterLow {
implicit def SeqSorter[CC[X] <: Seq[X]]: Sorter[CC] = new Sorter[CC] {
override def sort[A:Ordering](xs: CC[A]): CC[A] =
xs.sorted(implicitly[Ordering[A]]).asInstanceOf[CC[A]]
}
}
object Sorter extends SorterLow
| wookietreiber/eva4s-old | core/main/scala/util/Sorter.scala | Scala | gpl-3.0 | 582 |
package gui
import scalafx.collections.ObservableBuffer
/**
* Created by alex on 13/05/15.
*/
trait TitlesModel[TITLE <: Title] {
val titlesModel = ObservableBuffer[TITLE]()
}
| unclealex72/ripper | src/main/scala/gui/TitlesModel.scala | Scala | mit | 184 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.optim
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.dllib.feature.dataset.{MiniBatch, Sample, SampleToMiniBatch}
import com.intel.analytics.bigdl.dllib.models.utils.ModelBroadcast
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.{Engine, MklDnn}
import com.intel.analytics.bigdl.dllib.utils.intermediate.ConversionUtils
import org.apache.spark.rdd
import org.apache.spark.rdd.RDD
import scala.reflect.ClassTag
object Evaluator {
def apply[T: ClassTag](model: Module[T])(implicit ev: TensorNumeric[T]): Evaluator[T] = {
new Evaluator[T](model)
}
}
/**
* model evaluator
* @param model model to be evaluated
*/
class Evaluator[T: ClassTag] private[optim](model: Module[T])(implicit ev: TensorNumeric[T])
extends Serializable {
private val batchPerPartition = 4
/**
* Applies ValidationMethod to the model and rdd dataset.
* @param vMethods
* @param batchSize total batchsize
* @return
*/
def test(dataset: RDD[Sample[T]],
vMethods: Array[ValidationMethod[T]],
batchSize: Option[Int] = None): Array[(ValidationResult, ValidationMethod[T])] = {
val partitionNum = dataset.partitions.length
val totalBatch = batchSize.getOrElse(batchPerPartition * partitionNum)
val dummyInput = Predictor.getDummyData(dataset, totalBatch / partitionNum)
val modelBroad = ModelBroadcast[T]().broadcast(dataset.sparkContext,
ConversionUtils.convert(model.evaluate()), dummyInput)
val rdd = ConversionUtils.coalesce(dataset)
val otherBroad = rdd.sparkContext.broadcast(vMethods, SampleToMiniBatch(
batchSize = totalBatch, partitionNum = Some(rdd.partitions.length)))
rdd.mapPartitions(partition => {
val localModel = modelBroad.value(false, true, dummyInput)
val localMethod = otherBroad.value._1.map(_.clone())
val localTransformer = otherBroad.value._2.cloneTransformer()
val miniBatch = localTransformer(partition)
miniBatch.map(batch => {
val output = localModel.forward(batch.getInput())
localMethod.map(validation => {
validation(output, batch.getTarget())
})
})
}).reduce((left, right) => {
left.zip(right).map { case (l, r) => l + r }
}).zip(vMethods)
}
/**
* Apply ValidationMethod to the model and rdd dataset.
* @param vMethods
* @return
*/
private[bigdl] def testMiniBatch(dataset: RDD[MiniBatch[T]],
vMethods: Array[ValidationMethod[T]]
): Array[(ValidationResult, ValidationMethod[T])] = {
val rdd = ConversionUtils.coalesce(dataset)
val modelBroad = ModelBroadcast[T]().broadcast(rdd.sparkContext,
ConversionUtils.convert(model.evaluate()))
val otherBroad = rdd.sparkContext.broadcast(vMethods)
rdd.mapPartitions(miniBatch => {
val localModel = modelBroad.value()
val localMethod = otherBroad.value
miniBatch.map(batch => {
val output = localModel.forward(batch.getInput())
localMethod.map(validation => {
validation(output, batch.getTarget())
})
})
}).reduce((left, right) => {
left.zip(right).map { case (l, r) => l + r }
}).zip(vMethods)
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/optim/Evaluator.scala | Scala | apache-2.0 | 3,891 |
package com.wavesplatform.matcher.integration
import com.wavesplatform.matcher.api.CancelOrderRequest
import com.wavesplatform.settings.Constants
import org.scalatest.concurrent.Eventually
import org.scalatest.time.SpanSugar._
import org.scalatest.{FunSuite, Matchers}
import play.api.libs.json.{JsArray, JsValue, Json}
import scorex.account.{Account, PrivateKeyAccount}
import scorex.crypto.encode.Base58
import scorex.transaction.AssetAcc
import scorex.transaction.assets.exchange.Order
import scorex.transaction.assets.exchange.OrderJson._
import scorex.transaction.state.wallet.{IssueRequest, TransferRequest}
import scorex.utils.NTP
/**
* !!! Tests should work only as whole TestSuite in sequence one by one, not separately,
* as the state depends on the previous test
*/
class MatcherAPISpecification extends FunSuite with Matchers with Eventually with scorex.waves.TestingCommons {
private val wallet = application.wallet
private val AccountM = wallet.privateKeyAccounts()(2)
private val AccountA = wallet.privateKeyAccounts().head
private var Asset1 = Option("")
private var MBalance = 0L
private var MBalance1 = 0L
private var ABalance = 0L
private var ABalance1 = 0L
private val TxFee = 100000L
private val storedState = application.storedState
private var orderIdToCancel = Option.empty[String]
private val MatcherPubKey = application.wallet.privateKeyAccount(application.settings.matcherAccount).
map(a => Base58.encode(a.publicKey)).get
def initBalances() = {
assetTransfer(AccountM, AccountA, 2000 * Constants.UnitsInWave)
Asset1 = Some(issueAsset(AccountM, 1000 * Constants.UnitsInWave))
MBalance = storedState.assetBalance(AssetAcc(AccountM, None))
MBalance1 = storedState.assetBalance(AssetAcc(AccountM, Asset1.flatMap(Base58.decode(_).toOption)))
ABalance = storedState.assetBalance(AssetAcc(AccountA, None))
ABalance1 = storedState.assetBalance(AssetAcc(AccountA, Asset1.flatMap(Base58.decode(_).toOption)))
}
def issueAsset(from: Account, amount: Long): String = {
val json =
s"""{
| "name": "string",
| "quantity": $amount,
| "description": "string",
| "sender": "${from.address}",
| "decimals": 8,
| "reissuable": true,
| "fee": 100000000
|}""".stripMargin
val req = Json.parse(json).validate[IssueRequest].get
val resp = postRequest(us = "/assets/issue", body = json)
val id = (resp \\ "id").as[String]
id should not be empty
waitForBalance(amount, from, Some(id))
id
}
def assetTransfer(from: Account, to: Account, amount: Long, assetId: Option[String] = None) = {
val json =
s"""
|{
| "recipient": "${to.address}",
| "amount": $amount,
| "attachment": "",
| "sender": "${from.address}",
| "fee": 100000
|}""".stripMargin
val req = Json.parse(json).validate[TransferRequest].get
val resp = postRequest(us = "/assets/transfer", body = json)
(resp \\ "id").as[String] should not be empty
waitForBalance(amount, to, None)
}
def waitForBalance(balance: Long, acc: Account, asset: Option[String] = None): Unit = {
val assetId = asset.flatMap(Base58.decode(_).toOption)
eventually(timeout(5.seconds), interval(500.millis)) {
storedState.assetBalance(
AssetAcc(acc, assetId)) should be(balance)
}
}
def placeOrder(acc: PrivateKeyAccount, spendAsset: Option[String], receiveAsset: Option[String],
price: Double, amount: Long, expectedStatus: String = "OrderAccepted"): Option[String] = {
val created = NTP.correctedTime()
val timeToLive = created + Order.MaxLiveTime - 1000
val pubKeyStr = Base58.encode(acc.publicKey)
val json =
s"""{
| "matcherFee": 100000,
| "price": ${(price * Order.PriceConstant).toLong},
| "spendAssetId": "${spendAsset.getOrElse("")}",
| "receiveAssetId": "${receiveAsset.getOrElse("")}",
| "amount": $amount,
| "timestamp": $created,
| "expiration": $timeToLive,
| "matcher": "$MatcherPubKey",
| "sender": "$pubKeyStr"
|}""".stripMargin
val order = Json.parse(json).validate[Order].get
val signed = Order.sign(order, acc)
val signedJson = signed.json
val resp = matcherPostRequest("/orders/place", body = signedJson.toString)
(resp \\ "status").as[String] shouldBe expectedStatus
(resp \\ "message" \\ "id").toOption.map(_.as[String])
}
def getOrderBook(asset: Option[String]): JsValue = {
matcherGetRequest(s"/orderBook", params = Map("asset1" -> asset.get))
}
def getOrderStatus(asset: Option[String], id: String): JsValue = {
matcherGetRequest(s"/orders/status/$id", params = Map("asset1" -> asset.get))
}
def waitForOrderStatus(asset: Option[String], id: String, status: String) = {
eventually(timeout(5.seconds), interval(500.millis)) {
(getOrderStatus(Asset1, id) \\ "status").as[String] should be("Filled")
}
}
def cancelOrder(acc: PrivateKeyAccount, spendAsset: Option[String], receiveAsset: Option[String],
orderId: String, expectedStatus: String = "OrderCanceled"): Unit = {
val ts = NTP.correctedTime()
val pubKeyStr = Base58.encode(acc.publicKey)
val json =
s"""{
| "sender": "$pubKeyStr",
| "orderId": "$orderId",
| "signature": "signature"
|}""".stripMargin
val orderCancel = Json.parse(json).validate[CancelOrderRequest].get
val signed = CancelOrderRequest.sign(orderCancel, acc)
val signedJson = signed.json
val (a1, a2) = if (spendAsset.isDefined) (spendAsset.get, receiveAsset.getOrElse("")) else
(receiveAsset.get, spendAsset.getOrElse(""))
val resp = matcherPostRequest("/orders/cancel", body = signedJson.toString,
params = Map("asset1" -> a1, "asset2" -> a2))
(resp \\ "status").as[String] shouldBe expectedStatus
}
override def beforeAll(): Unit = {
super.beforeAll()
Thread.sleep(1000)
}
test("start") {
// don't move this to `beforeAll`! if this fails, `afterAll` never happens, leading to ports remain open
initBalances()
Thread.sleep(1000)
}
test("/matcher/publicKey") {
val resp = matcherGetRequest("/publicKey")
resp.as[String] shouldBe MatcherPubKey
}
test("place sell order") {
orderIdToCancel = placeOrder(AccountM, Asset1, None, 2, 500 * Constants.UnitsInWave)
val ob = getOrderBook(Asset1)
((ob \\ "asks") (0) \\ "price").as[Long] shouldBe 2 * Order.PriceConstant
((ob \\ "asks") (0) \\ "amount").as[Long] shouldBe 500 * Constants.UnitsInWave
}
test("match with buy order") {
val id = placeOrder(AccountA, None, Asset1, 2, 200 * Constants.UnitsInWave)
val ob = getOrderBook(Asset1)
((ob \\ "asks") (0) \\ "amount").as[Long] shouldBe 300 * Constants.UnitsInWave
val executedFee = 100000L
MBalance += 200 * Constants.UnitsInWave + executedFee
waitForBalance(MBalance, AccountM, None)
ABalance -= 200 * Constants.UnitsInWave + executedFee
waitForBalance(ABalance, AccountA, None)
MBalance1 -= 100 * Constants.UnitsInWave
waitForBalance(MBalance1, AccountM, Asset1)
ABalance1 += 100 * Constants.UnitsInWave
waitForBalance(ABalance1, AccountA, Asset1)
(getOrderStatus(Asset1, id.get) \\ "status").as[String] should be("Filled")
}
test("submit more orders than available assets including open") {
waitForBalance(900 * Constants.UnitsInWave, AccountM, Asset1) // And 300 by 2 is open = 150
// Should be available Asset1 = 900 - 150 = 750*1.5 = 1125 WAVES
placeOrder(AccountM, Asset1, None, 1.5, 1126 * Constants.UnitsInWave, "OrderRejected")
placeOrder(AccountM, Asset1, None, 1.5, 1125 * Constants.UnitsInWave, "OrderAccepted")
val ob = getOrderBook(Asset1)
((ob \\ "asks") (0) \\ "price").as[Long] shouldBe (1.5 * Order.PriceConstant).toLong
((ob \\ "asks") (0) \\ "amount").as[Long] shouldBe 1125 * Constants.UnitsInWave
((ob \\ "asks") (1) \\ "price").as[Long] shouldBe (2 * Order.PriceConstant).toLong
((ob \\ "asks") (1) \\ "amount").as[Long] shouldBe 300 * Constants.UnitsInWave
}
test("buy order match several price levels") {
val id = placeOrder(AccountA, None, Asset1, 2.5, 1225 * Constants.UnitsInWave, "OrderAccepted")
waitForOrderStatus(Asset1, id.get, "Filled")
val executedFee = 100000L * 1125 / 1225 + 100000L * 100 / 1225
MBalance += 1225 * Constants.UnitsInWave + executedFee
waitForBalance(MBalance, AccountM, None)
ABalance -= 1225 * Constants.UnitsInWave + executedFee //shouldBe 574.99800001
waitForBalance(ABalance, AccountA, None)
val ob = getOrderBook(Asset1)
((ob \\ "asks") (0) \\ "price").as[Long] shouldBe (2 * Order.PriceConstant).toLong
((ob \\ "asks") (0) \\ "amount").as[Long] shouldBe 200 * Constants.UnitsInWave
val assetAmount = (1125 * Constants.UnitsInWave / 1.5).toLong + 100 * Constants.UnitsInWave / 2
MBalance1 -= assetAmount // shouldBe 100
waitForBalance(MBalance1, AccountM, Asset1)
ABalance1 += assetAmount // shouldBe 900
waitForBalance(ABalance1, AccountA, Asset1)
}
test("cancel order and resubmit a new one") {
cancelOrder(AccountM, Asset1, None, orderIdToCancel.get)
placeOrder(AccountM, Asset1, None, 5, 500 * Constants.UnitsInWave, "OrderAccepted")
}
test("buy order should execute all open orders and put remaining in OrderBook") {
waitForBalance(ABalance, AccountA, None)
placeOrder(AccountA, None, Asset1, 5.5, 550 * Constants.UnitsInWave, "OrderAccepted")
MBalance1 = 0
waitForBalance(MBalance1, AccountM, Asset1)
ABalance1 = 1000 * Constants.UnitsInWave
waitForBalance(ABalance1, AccountA, Asset1)
ABalance -= 500 * Constants.UnitsInWave + (TxFee * 500 / 550)
waitForBalance(ABalance, AccountA, None)
val ob = getOrderBook(Asset1)
((ob \\ "bids") (0) \\ "price").as[Long] shouldBe (5.5 * Order.PriceConstant).toLong
((ob \\ "bids") (0) \\ "amount").as[Long] shouldBe 50 * Constants.UnitsInWave
(ob \\ "asks").get.asInstanceOf[JsArray].value.size shouldBe 0
}
}
| B83YPoj/Waves | src/test/scala/com/wavesplatform/matcher/integration/MatcherAPISpecification.scala | Scala | apache-2.0 | 10,200 |
package org.firedancer3d.math
import Math._
case class Ray(origin: Vec3 = Vec3.zero, direction: Vec3 = Vec3.unitZ) {
def distanceSquared(point: Vec3) = {
val p1 = point - origin
val t0 = direction dot p1
val p2 = direction * t0 + origin
val p3 = point - p2
p3.lengthSquared
}
def distance(point: Vec3) = Math.sqrt(distanceSquared(point))
} | cyberthinkers/FireDancer3D | firedancer3d_shared/src/main/scala/org/firedancer3d/scenegraph/geometry/Ray.scala | Scala | mit | 382 |
package com.gateway.server
import org.vertx.java.core.json.{JsonArray, JsonObject}
import java.lang.StringBuilder
object QMongo {
def createUserQuery(passwordHash: String, email: String): JsonObject = {
new JsonObject().putString("collection", "users")
.putString("action", "find")
.putObject("matcher", new JsonObject()
.putString("query", " email $eq \\"" + email + "\\"" + " password $eq \\"" + passwordHash + "\\""));
}
def followedTeams(ids: java.util.Iterator[Object]): JsonObject = {
import scala.collection.JavaConversions.asScalaIterator
val scalaIds: Iterator[Object] = ids
var first = true
val resultsLine = new StringBuilder()
for (id <- scalaIds) {
if (first) {
resultsLine.append("\\"").append(id.toString).append("\\"");
first = false;
} else {
resultsLine.append(",").append("\\"").append(id.toString).append("\\"");
}
}
new JsonObject().putString("collection", "teams")
.putString("action", "find")
.putObject("matcher", new JsonObject()
.putString("query", " _id $in { " + resultsLine.toString() + " }"))
}
def recentResultByTeam(teamName: String): JsonObject = {
new JsonObject().putString("collection", "recent")
.putString("action", "find")
.putObject("matcher",
new JsonObject().putString("teamName", teamName));
}
def recentResultsById(recentIds: JsonArray): JsonObject = {
val resultLine = new scala.StringBuilder()
val cleanLine = recentIds.toString.replaceAll("\\\\[\\"|\\\\\\"]", "")
resultLine.append("\\"").append(cleanLine).append("\\"")
new JsonObject().putString("collection", "results").putString("action", "find")
.putObject("matcher", new JsonObject().putString("query", "_id $in { " + resultLine.toString + " }"))
}
}
| xgrommx/splanet | src/main/java/com/gateway/server/QMongo.scala | Scala | apache-2.0 | 1,820 |
package utils
import base.TestBaseDefinition
import org.scalatest.WordSpec
import utils.PreferenceSort._
import scala.util.Random
class PreferenceSortSpec extends WordSpec with TestBaseDefinition {
val l = Vector(
prefs(1, 4),
prefs(2, 4, 5),
prefs(3),
prefs(4, 1, 2),
prefs(5),
prefs(6, 7),
prefs(7, 9, 10),
prefs(8, 9),
prefs(9, 8),
prefs(10, 7),
prefs(11, 14),
prefs(12, 11),
prefs(13),
prefs(14, 11),
prefs(15, 2),
prefs(16),
prefs(17),
prefs(18, 19, 20),
prefs(19, 20, 18),
prefs(20, 19, 18)
)
def prefs[A](a: A, prfs: A*) = (a, prfs.toSet)
def against[A](people: Vector[Pair[A]], groups: Vector[Vector[A]]): Double = {
val filtered = people filter (_._2.nonEmpty)
val sat = filtered.foldLeft(0) { (ls, n) =>
n match {
case (v, set) => groups find (_ contains v) match {
case Some(g) =>
if (set.isEmpty) ls
else set.foldLeft(ls) { (ns, f) =>
if (g contains f) ns + 1
else ns
}
case None => ls
}
}
}
sat.toDouble / filtered.size.toDouble
}
"Satisfaction percentage function" should {
"calculate the percentage properly" in {
val options = Vector(
prefs(1, 2),
prefs(2, 3),
prefs(3, 2),
prefs(4),
prefs(5, 4),
prefs(6, 7)
)
val groups = Vector(Vector(1, 2, 3), Vector(4, 5, 6))
val calcPerc = against(options, groups)
val truePerc = 4.0 / 5.0 //4 out of 5 are in a group with a friend
truePerc shouldBe calcPerc
}
}
"A group division sort" should {
"always have a lower bound of amortized satisfaction percentage of about 90%" in {
(1 to 1000) foreach { _ =>
val depth = 100
val percentage = (1 to depth).foldLeft(0.0) { (sat, _) =>
val shuffled = Random.shuffle(l)
val gSize = {
val rnd = Random.nextInt(7)
if (rnd < 5) 5
else rnd
}
val sortedGrouped = sort(shuffled).grouped(gSize).toVector
sat + against(l, sortedGrouped)
}
(percentage / depth) >= 0.9 shouldBe true
}
}
}
}
| THK-ADV/lwm-reloaded | test/utils/PreferenceSortSpec.scala | Scala | mit | 2,250 |
// code-examples/AppDesign/options-nulls/option-for-comp-v1-script.scala
case class User(userName: String, name: String, email: String, bio: String)
val newUserProfiles = List(
Map("userName" -> "twitspam", "name" -> "Twit Spam"),
Map("userName" -> "bucktrends", "name" -> "Buck Trends",
"email" -> "[email protected]", "bio" -> "World's greatest bloviator"),
Map("userName" -> "lonelygurl", "name" -> "Lonely Gurl",
"bio" -> "Obviously fake..."),
Map("userName" -> "deanwampler", "name" -> "Dean Wampler",
"email" -> "[email protected]", "bio" -> "Scala passionista"),
Map("userName" -> "al3x", "name" -> "Alex Payne",
"email" -> "[email protected]", "bio" -> "Twitter API genius"))
// Version #1
var validUsers = for {
user <- newUserProfiles
if (user.contains("userName") && user.contains("name") && // #1
user.contains("email") && user.contains("bio")) // #1
userName <- user get "userName"
name <- user get "name"
email <- user get "email"
bio <- user get "bio" }
yield User(userName, name, email, bio)
validUsers.foreach (user => println(user))
| XClouded/t4f-core | scala/src/tmp/AppDesign/options-nulls/option-for-comp-v1-script.scala | Scala | apache-2.0 | 1,136 |
import scala.annotation.tailrec
sealed trait Bowling {
def roll(pins: Int): Bowling
def score(): Either[Error, Int]
}
object Bowling {
def apply(): Bowling = new BowlingImpl(List.empty)
private class BowlingImpl(rolls: List[Int]) extends Bowling {
override def roll(pins: Int): Bowling = new BowlingImpl(rolls ++ List(pins))
override def score(): Either[Error, Int] =
if (rolls.isEmpty) {
Left(Error("Unstarted game cannot be scored"))
} else {
val fs = frames(rolls, 1)
if (fs.length != 10) {
Left(Error("Invalid number of frames - 10 expected"))
} else {
score(fs, 0)
}
}
@tailrec
private def score(frames: List[List[Int]], acc: Int): Either[Error, Int] =
frames match {
case x::xs => val frameScore = scoreFrame(x, xs, acc)
frameScore match {
case Right(sum) => score(xs, sum)
case error => error
}
case _ => Right(acc)
}
private def scoreFrame(frame: List[Int], remainingFrames: List[List[Int]], acc: Int): Either[Error, Int] = {
if (frame.exists(s => s < 0)) {
Left(Error("rolls can not score negative points"))
} else if (frame.exists(s => s > 10)) {
Left(Error("a roll can not score more than 10 points"))
} else if (remainingFrames.nonEmpty && frame.sum > 10) {
Left(Error("two rolls in a frame can not score more than 10 points"))
} else if (remainingFrames.isEmpty && !isValidFinalFrame(frame)) {
Left(Error("invalid final frame"))
} else {
val score = if (strike(frame)) {
acc + frame.sum + strikeBonus(remainingFrames)
} else if (spare(frame)) {
acc + frame.sum + spareBonus(remainingFrames)
} else {
acc + frame.sum
}
Right(score)
}
}
private def frames(rolls: List[Int], i: Int): List[List[Int]] = {
if (rolls.isEmpty) {
List.empty
}
else {
val throws = numThrows(rolls, i)
rolls.take(throws)::frames(rolls.drop(throws), i + 1)
}
}
private def numThrows(rolls: List[Int], frameNum: Int): Int = {
if (frameNum == 10) {
if (strike(rolls) || spare(rolls)) 3
else 2
} else if (strike(rolls)) {
1
} else {
2
}
}
private def strike(rolls: List[Int]): Boolean =
rolls.headOption.getOrElse(0) == 10
private def spare(rolls: List[Int]): Boolean =
rolls.take(2).sum == 10
private def strikeBonus(frames: List[List[Int]]): Int =
frames.take(2).flatten.take(2).sum
private def spareBonus(frames: List[List[Int]]): Int =
frames match {
case x::xs => x.head
case _ => 0
}
private def isValidFinalFrame(rolls: List[Int]): Boolean = {
val isStrike = strike(rolls)
val isSpare = spare(rolls)
if (rolls.length == 2) {
!isStrike && !isSpare
} else if (rolls.length == 3) {
(isStrike || isSpare) &&
(if (isStrike) {
rolls(1) == 10 || (rolls(1) + rolls(2) <= 10)
} else {
isSpare || rolls(1) + rolls(2) <= 10
})
} else {
false
}
}
}
}
case class Error(errorText: String) | exercism/xscala | exercises/practice/bowling/Example.scala | Scala | mit | 3,306 |
package com.eharmony.aloha.util
import org.junit.Assert._
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.BlockJUnit4ClassRunner
@RunWith(classOf[BlockJUnit4ClassRunner])
class SubSeqIteratorTest {
@Test def test1(): Unit = {
val v = SubSeqIterator(1 to 4, 2).toVector
assertEquals(6, v.size)
assertEquals(Set(Vector(1, 2), Vector(1, 3), Vector(2, 3), Vector(1, 4), Vector(2, 4), Vector(3, 4)), v.toSet)
}
}
| eHarmony/aloha | aloha-core/src/test/scala/com/eharmony/aloha/util/SubSeqIteratorTest.scala | Scala | mit | 473 |
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.lang._
import leon.lang.xlang._
object Epsilon1 {
def rand2(x: Int): Int = epsilon((y: Int) => true)
//this should not hold
def property2(x: Int): Boolean = {
rand2(x) == rand2(x+1)
}.holds
}
| regb/leon | src/test/resources/regression/verification/xlang/invalid/Epsilon1.scala | Scala | gpl-3.0 | 263 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.expressions
import org.apache.spark.annotation.Stable
import org.apache.spark.sql.{Column, Row}
import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression, Complete}
import org.apache.spark.sql.execution.aggregate.ScalaUDAF
import org.apache.spark.sql.types._
/**
* The base class for implementing user-defined aggregate functions (UDAF).
*
* @since 1.5.0
* @deprecated UserDefinedAggregateFunction is deprecated.
* Aggregator[IN, BUF, OUT] should now be registered as a UDF via the functions.udaf(agg) method.
*/
@Stable
@deprecated("Aggregator[IN, BUF, OUT] should now be registered as a UDF" +
" via the functions.udaf(agg) method.", "3.0.0")
abstract class UserDefinedAggregateFunction extends Serializable {
/**
* A `StructType` represents data types of input arguments of this aggregate function.
* For example, if a [[UserDefinedAggregateFunction]] expects two input arguments
* with type of `DoubleType` and `LongType`, the returned `StructType` will look like
*
* ```
* new StructType()
* .add("doubleInput", DoubleType)
* .add("longInput", LongType)
* ```
*
* The name of a field of this `StructType` is only used to identify the corresponding
* input argument. Users can choose names to identify the input arguments.
*
* @since 1.5.0
*/
def inputSchema: StructType
/**
* A `StructType` represents data types of values in the aggregation buffer.
* For example, if a [[UserDefinedAggregateFunction]]'s buffer has two values
* (i.e. two intermediate values) with type of `DoubleType` and `LongType`,
* the returned `StructType` will look like
*
* ```
* new StructType()
* .add("doubleInput", DoubleType)
* .add("longInput", LongType)
* ```
*
* The name of a field of this `StructType` is only used to identify the corresponding
* buffer value. Users can choose names to identify the input arguments.
*
* @since 1.5.0
*/
def bufferSchema: StructType
/**
* The `DataType` of the returned value of this [[UserDefinedAggregateFunction]].
*
* @since 1.5.0
*/
def dataType: DataType
/**
* Returns true iff this function is deterministic, i.e. given the same input,
* always return the same output.
*
* @since 1.5.0
*/
def deterministic: Boolean
/**
* Initializes the given aggregation buffer, i.e. the zero value of the aggregation buffer.
*
* The contract should be that applying the merge function on two initial buffers should just
* return the initial buffer itself, i.e.
* `merge(initialBuffer, initialBuffer)` should equal `initialBuffer`.
*
* @since 1.5.0
*/
def initialize(buffer: MutableAggregationBuffer): Unit
/**
* Updates the given aggregation buffer `buffer` with new input data from `input`.
*
* This is called once per input row.
*
* @since 1.5.0
*/
def update(buffer: MutableAggregationBuffer, input: Row): Unit
/**
* Merges two aggregation buffers and stores the updated buffer values back to `buffer1`.
*
* This is called when we merge two partially aggregated data together.
*
* @since 1.5.0
*/
def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit
/**
* Calculates the final result of this [[UserDefinedAggregateFunction]] based on the given
* aggregation buffer.
*
* @since 1.5.0
*/
def evaluate(buffer: Row): Any
/**
* Creates a `Column` for this UDAF using given `Column`s as input arguments.
*
* @since 1.5.0
*/
@scala.annotation.varargs
def apply(exprs: Column*): Column = {
val aggregateExpression =
AggregateExpression(
ScalaUDAF(exprs.map(_.expr), this),
Complete,
isDistinct = false)
Column(aggregateExpression)
}
/**
* Creates a `Column` for this UDAF using the distinct values of the given
* `Column`s as input arguments.
*
* @since 1.5.0
*/
@scala.annotation.varargs
def distinct(exprs: Column*): Column = {
val aggregateExpression =
AggregateExpression(
ScalaUDAF(exprs.map(_.expr), this),
Complete,
isDistinct = true)
Column(aggregateExpression)
}
}
/**
* A `Row` representing a mutable aggregation buffer.
*
* This is not meant to be extended outside of Spark.
*
* @since 1.5.0
*/
@Stable
abstract class MutableAggregationBuffer extends Row {
/** Update the ith value of this buffer. */
def update(i: Int, value: Any): Unit
}
| maropu/spark | sql/core/src/main/scala/org/apache/spark/sql/expressions/udaf.scala | Scala | apache-2.0 | 5,321 |
/**
* Copyright 2014 Idio
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @author David Przybilla [email protected]
**/
package org.idio.dbpedia.spotlight.stores
import org.dbpedia.spotlight.db.memory.{ MemoryStore, MemorySurfaceFormStore }
import org.dbpedia.spotlight.exceptions.SurfaceFormNotFoundException
import java.io.{ File, FileInputStream }
class CustomSurfaceFormStore(val pathtoFolder: String) {
val sfMemFile = new FileInputStream(new File(pathtoFolder, "sf.mem"))
var sfStore: MemorySurfaceFormStore = MemoryStore.loadSurfaceFormStore(sfMemFile)
/*
* Updates the internal arrays for a new SurfaceForm
* */
private def addSF(surfaceText: String) {
println("\\t adding a new surface form..." + surfaceText)
this.sfStore.stringForID = this.sfStore.stringForID :+ surfaceText
// the counts for the new surface form is the avg of the counts for the other surface forms
this.sfStore.annotatedCountForID = this.sfStore.annotatedCountForID :+ 1
this.sfStore.totalCountForID = this.sfStore.totalCountForID :+ 1
}
/*
* Adds a list of surfaceforms directly to the low level maps.
* Assumes that the surfaceForms in the list does not exist already in the low level maps
* */
private def addListOfNewSurfaceForms(listOfNewSurfaceForms: List[String]): List[Int] = {
val indexFirstNewSf = this.sfStore.stringForID.length
val indexLastNewSf = (this.sfStore.stringForID.length + listOfNewSurfaceForms.size)
this.sfStore.stringForID = this.sfStore.stringForID ++ listOfNewSurfaceForms
val defaultValueList = List.fill(listOfNewSurfaceForms.size)(1)
this.sfStore.annotatedCountForID = this.sfStore.annotatedCountForID ++ defaultValueList
this.sfStore.totalCountForID = this.sfStore.totalCountForID ++ defaultValueList
return List.range(indexFirstNewSf, indexLastNewSf)
}
/*
* Adds a new surfaceForm to the surfaceFormStore.
* It does NOT check whether it exists
* returns the Id of the new SF
* */
def addSurfaceForm(surfaceText: String): Int = {
//adding the SF to the internal arrays
this.addSF(surfaceText)
// update internal indexes
println("\\t updating the SF index")
this.sfStore.createReverseLookup()
var surfaceForm = this.sfStore.getSurfaceForm(surfaceText)
return surfaceForm.id
}
/*
* Adds a set of surfaceForms to the surfaceFormStore in a singleBatch,
* It only adds sf which are not already in the store.
* returns a list with the Ids of the added SurfaceForms
* */
def addSetOfSF(setOfSurfaceForms: scala.collection.Set[String]): List[Int] = {
// Searching SF in the main Store
val searchSurfaceFormResult = setOfSurfaceForms.toSeq.par.map(surfaceForm =>
try {
val sf = this.sfStore.getSurfaceForm(surfaceForm)
println("\\t found..\\t" + surfaceForm)
sf.id
} catch {
case e: SurfaceFormNotFoundException => {
surfaceForm
}
})
// Separating Existing SF from non existing
val (listOfNewSurfaceForms, listOfExistingSFIds) = searchSurfaceFormResult.par.partition(_.isInstanceOf[String])
// Adding the non-existent SF to the low level maps
val listOfNewSurfaceFormIds: List[Int] = addListOfNewSurfaceForms(listOfNewSurfaceForms.toList.asInstanceOf[List[String]])
// making all new SF spottable(updating Probabilities)
val allSFIds: List[Int] = listOfExistingSFIds.toList.asInstanceOf[List[Int]] ++ listOfNewSurfaceFormIds
allSFIds.foreach(
surfaceFormId => boostCountsIfNeeded(surfaceFormId))
// Rebuilding reverse lookups
println("\\t updating the SF index")
this.sfStore.createReverseLookup()
return listOfNewSurfaceFormIds
}
/*
* Raises the SF counts to pass the minimum threshold needed to be spottable
* makes the SF annotationProbability equals to 0.27, this is done by rising the annotatedCounts
* */
def boostCountsIfNeeded(surfaceFormID: Int) {
val annotationProbability = this.sfStore.annotatedCountForID(surfaceFormID) / this.sfStore.totalCountForID(surfaceFormID).toDouble
if (annotationProbability < 0.27) {
var newAnnotatedCount = (0.27 * this.sfStore.totalCountForID(surfaceFormID).toDouble).toInt + 1
this.sfStore.annotatedCountForID(surfaceFormID) = newAnnotatedCount
}
}
/*
* Raises the SF counts to pass the minimum threshold needed to be spottable
* makes the SF annotationProbability equals to 0.27, this is done by rising the annotatedCounts
* */
def boostCountsIfNeededByString(surfaceFormText: String) {
try {
val surfaceFormId = this.sfStore.getSurfaceForm(surfaceFormText).id
this.boostCountsIfNeeded(surfaceFormId)
} catch {
case e: SurfaceFormNotFoundException => {
println("given SF:" + surfaceFormText + " does not exist")
}
}
}
/*
* Reduces the SF counts making it less likely to be spotted.
* Makes the SF annotationProbability equals to 0.1, this is done by reducing the annotatedCounts
* */
def decreaseSpottingProbabilityById(surfaceFormID: Int, spotProbability: Double) {
val annotationProbability = this.sfStore.annotatedCountForID(surfaceFormID) / this.sfStore.totalCountForID(surfaceFormID).toDouble
if (this.sfStore.totalCountForID(surfaceFormID) < 2) {
this.sfStore.totalCountForID(surfaceFormID) = 10
}
if (annotationProbability > spotProbability) {
var newAnnotatedCount = (spotProbability * this.sfStore.totalCountForID(surfaceFormID).toDouble).toInt + 1
this.sfStore.annotatedCountForID(surfaceFormID) = newAnnotatedCount
}
}
/*
* Reduces the SF counts making it less likely to be spotted.
* Makes the SF annotationProbability equals to 0.1, this is done by reducing the annotatedCounts
* */
def decreaseSpottingProbabilityByString(surfaceText: String, spotProbability: Double) {
// looks for the id of the surfaceForm
try {
var surfaceForm = this.sfStore.getSurfaceForm(surfaceText)
this.decreaseSpottingProbabilityById(surfaceForm.id, spotProbability)
println("\\t the counts for:" + surfaceText + "," + surfaceForm.id + " has been reduced.")
} catch {
case e: SurfaceFormNotFoundException => println("\\tgiven surface form:" + surfaceText + " does not exist...")
}
}
/*
* Given a SurfaceForm if it exists returns its Id
* otherwise it creates it, rebuild the internal index, and return the SF ID
* */
def getAddSurfaceForm(surfaceText: String): Int = {
// look for existing surfaceForm
try {
var surfaceForm = this.sfStore.getSurfaceForm(surfaceText)
this.boostCountsIfNeeded(surfaceForm.id)
return surfaceForm.id
} catch {
case e: SurfaceFormNotFoundException => println("creating surface form...")
}
// create sf in case it cant be found
var surfaceFormId = this.addSurfaceForm(surfaceText)
this.boostCountsIfNeeded(surfaceFormId)
return surfaceFormId
}
}
| idio/spotlight-model-editor | src/main/scala/org/idio/dbpedia/spotlight/stores/CustomSurfaceFormStore.scala | Scala | apache-2.0 | 7,469 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.atomic
/** Atomic references wrapping `Char` values.
*
* Note that the equality test in `compareAndSet` is value based,
* since `Char` is a primitive.
*/
final class AtomicChar private[atomic]
(initialValue: Char) extends AtomicNumber[Char] {
private[this] var ref = initialValue
private[this] val mask = 255 + 255 * 256
def getAndSet(update: Char): Char = {
val current = ref
ref = update
current
}
def compareAndSet(expect: Char, update: Char): Boolean = {
if (ref == expect) {
ref = update
true
}
else
false
}
def set(update: Char): Unit = {
ref = update
}
def get: Char = ref
def getAndSubtract(v: Char): Char = {
val c = ref
ref = ((ref - v) & mask).asInstanceOf[Char]
c
}
def subtractAndGet(v: Char): Char = {
ref = ((ref - v) & mask).asInstanceOf[Char]
ref
}
def subtract(v: Char): Unit = {
ref = ((ref - v) & mask).asInstanceOf[Char]
}
def getAndAdd(v: Char): Char = {
val c = ref
ref = ((ref + v) & mask).asInstanceOf[Char]
c
}
def getAndIncrement(v: Int = 1): Char = {
val c = ref
ref = ((ref + v) & mask).asInstanceOf[Char]
c
}
def addAndGet(v: Char): Char = {
ref = ((ref + v) & mask).asInstanceOf[Char]
ref
}
def incrementAndGet(v: Int = 1): Char = {
ref = ((ref + v) & mask).asInstanceOf[Char]
ref
}
def add(v: Char): Unit = {
ref = ((ref + v) & mask).asInstanceOf[Char]
}
def increment(v: Int = 1): Unit = {
ref = ((ref + v) & mask).asInstanceOf[Char]
}
def decrement(v: Int = 1): Unit = increment(-v)
def decrementAndGet(v: Int = 1): Char = incrementAndGet(-v)
def getAndDecrement(v: Int = 1): Char = getAndIncrement(-v)
}
/** @define createDesc Constructs an [[AtomicChar]] reference, allowing
* for fine-tuning of the created instance.
*
* A [[PaddingStrategy]] can be provided in order to counter
* the "false sharing" problem.
*
* Note that for ''Scala.js'' we aren't applying any padding,
* as it doesn't make much sense, since Javascript execution
* is single threaded, but this builder is provided for
* syntax compatibility anyway across the JVM and Javascript
* and we never know how Javascript engines will evolve.
*/
object AtomicChar {
/** Builds an [[AtomicChar]] reference.
*
* @param initialValue is the initial value with which to initialize the atomic
*/
def apply(initialValue: Char): AtomicChar =
new AtomicChar(initialValue)
/** $createDesc
*
* @param initialValue is the initial value with which to initialize the atomic
* @param padding is the [[PaddingStrategy]] to apply
*/
def withPadding(initialValue: Char, padding: PaddingStrategy): AtomicChar =
new AtomicChar(initialValue)
/** $createDesc
*
* Also this builder on top Java 8 also allows for turning off the
* Java 8 intrinsics, thus forcing usage of CAS-loops for
* `getAndSet` and for `getAndAdd`.
*
* @param initialValue is the initial value with which to initialize the atomic
* @param padding is the [[PaddingStrategy]] to apply
* @param allowPlatformIntrinsics is a boolean parameter that specifies whether
* the instance is allowed to use the Java 8 optimized operations
* for `getAndSet` and for `getAndAdd`
*/
def create(initialValue: Char, padding: PaddingStrategy, allowPlatformIntrinsics: Boolean): AtomicChar =
new AtomicChar(initialValue)
/** $createDesc
*
* This builder guarantees to construct a safe atomic reference that
* does not make use of `sun.misc.Unsafe`. On top of platforms that
* don't support it, notably some versions of Android or on top of
* the upcoming Java 9, this might be desirable.
*
* NOTE that explicit usage of this builder is not usually necessary
* because [[create]] can auto-detect whether the underlying platform
* supports `sun.misc.Unsafe` and if it does, then its usage is
* recommended, because the "safe" atomic instances have overhead.
*
* @param initialValue is the initial value with which to initialize the atomic
* @param padding is the [[PaddingStrategy]] to apply
*/
def safe(initialValue: Char, padding: PaddingStrategy): AtomicChar =
new AtomicChar(initialValue)
} | ddworak/monix | monix-execution/js/src/main/scala/monix/execution/atomic/AtomicChar.scala | Scala | apache-2.0 | 5,086 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.net.{URI, URISyntaxException}
import java.text.{BreakIterator, DecimalFormat, DecimalFormatSymbols}
import java.util.{HashMap, Locale, Map => JMap}
import java.util.regex.Pattern
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.util.{ArrayData, GenericArrayData, TypeUtils}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.{ByteArray, UTF8String}
////////////////////////////////////////////////////////////////////////////////////////////////////
// This file defines expressions for string operations.
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* An expression that concatenates multiple inputs into a single output.
* If all inputs are binary, concat returns an output as binary. Otherwise, it returns as string.
* If any input is null, concat returns null.
*/
@ExpressionDescription(
usage = "_FUNC_(str1, str2, ..., strN) - Returns the concatenation of str1, str2, ..., strN.",
examples = """
Examples:
> SELECT _FUNC_('Spark', 'SQL');
SparkSQL
""")
case class Concat(children: Seq[Expression]) extends Expression {
private lazy val isBinaryMode: Boolean = dataType == BinaryType
override def checkInputDataTypes(): TypeCheckResult = {
if (children.isEmpty) {
TypeCheckResult.TypeCheckSuccess
} else {
val childTypes = children.map(_.dataType)
if (childTypes.exists(tpe => !Seq(StringType, BinaryType).contains(tpe))) {
return TypeCheckResult.TypeCheckFailure(
s"input to function $prettyName should have StringType or BinaryType, but it's " +
childTypes.map(_.simpleString).mkString("[", ", ", "]"))
}
TypeUtils.checkForSameTypeInputExpr(childTypes, s"function $prettyName")
}
}
override def dataType: DataType = children.map(_.dataType).headOption.getOrElse(StringType)
override def nullable: Boolean = children.exists(_.nullable)
override def foldable: Boolean = children.forall(_.foldable)
override def eval(input: InternalRow): Any = {
if (isBinaryMode) {
val inputs = children.map(_.eval(input).asInstanceOf[Array[Byte]])
ByteArray.concat(inputs: _*)
} else {
val inputs = children.map(_.eval(input).asInstanceOf[UTF8String])
UTF8String.concat(inputs : _*)
}
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val evals = children.map(_.genCode(ctx))
val args = ctx.freshName("args")
val inputs = evals.zipWithIndex.map { case (eval, index) =>
s"""
${eval.code}
if (!${eval.isNull}) {
$args[$index] = ${eval.value};
}
"""
}
val (concatenator, initCode) = if (isBinaryMode) {
(classOf[ByteArray].getName, s"byte[][] $args = new byte[${evals.length}][];")
} else {
("UTF8String", s"UTF8String[] $args = new UTF8String[${evals.length}];")
}
val codes = ctx.splitExpressionsWithCurrentInputs(
expressions = inputs,
funcName = "valueConcat",
extraArguments = (s"${CodeGenerator.javaType(dataType)}[]", args) :: Nil)
ev.copy(s"""
$initCode
$codes
${CodeGenerator.javaType(dataType)} ${ev.value} = $concatenator.concat($args);
boolean ${ev.isNull} = ${ev.value} == null;
""")
}
override def toString: String = s"concat(${children.mkString(", ")})"
override def sql: String = s"concat(${children.map(_.sql).mkString(", ")})"
}
/**
* An expression that concatenates multiple input strings or array of strings into a single string,
* using a given separator (the first child).
*
* Returns null if the separator is null. Otherwise, concat_ws skips all null values.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(sep, [str | array(str)]+) - Returns the concatenation of the strings separated by `sep`.",
examples = """
Examples:
> SELECT _FUNC_(' ', 'Spark', 'SQL');
Spark SQL
""")
// scalastyle:on line.size.limit
case class ConcatWs(children: Seq[Expression])
extends Expression with ImplicitCastInputTypes {
require(children.nonEmpty, s"$prettyName requires at least one argument.")
override def prettyName: String = "concat_ws"
/** The 1st child (separator) is str, and rest are either str or array of str. */
override def inputTypes: Seq[AbstractDataType] = {
val arrayOrStr = TypeCollection(ArrayType(StringType), StringType)
StringType +: Seq.fill(children.size - 1)(arrayOrStr)
}
override def dataType: DataType = StringType
override def nullable: Boolean = children.head.nullable
override def foldable: Boolean = children.forall(_.foldable)
override def eval(input: InternalRow): Any = {
val flatInputs = children.flatMap { child =>
child.eval(input) match {
case s: UTF8String => Iterator(s)
case arr: ArrayData => arr.toArray[UTF8String](StringType)
case null => Iterator(null.asInstanceOf[UTF8String])
}
}
UTF8String.concatWs(flatInputs.head, flatInputs.tail : _*)
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
if (children.forall(_.dataType == StringType)) {
// All children are strings. In that case we can construct a fixed size array.
val evals = children.map(_.genCode(ctx))
val separator = evals.head
val strings = evals.tail
val numArgs = strings.length
val args = ctx.freshName("args")
val inputs = strings.zipWithIndex.map { case (eval, index) =>
if (eval.isNull != "true") {
s"""
${eval.code}
if (!${eval.isNull}) {
$args[$index] = ${eval.value};
}
"""
} else {
""
}
}
val codes = ctx.splitExpressionsWithCurrentInputs(
expressions = inputs,
funcName = "valueConcatWs",
extraArguments = ("UTF8String[]", args) :: Nil)
ev.copy(s"""
UTF8String[] $args = new UTF8String[$numArgs];
${separator.code}
$codes
UTF8String ${ev.value} = UTF8String.concatWs(${separator.value}, $args);
boolean ${ev.isNull} = ${ev.value} == null;
""")
} else {
val array = ctx.freshName("array")
val varargNum = ctx.freshName("varargNum")
val idxVararg = ctx.freshName("idxInVararg")
val evals = children.map(_.genCode(ctx))
val (varargCount, varargBuild) = children.tail.zip(evals.tail).map { case (child, eval) =>
child.dataType match {
case StringType =>
("", // we count all the StringType arguments num at once below.
if (eval.isNull == "true") {
""
} else {
s"$array[$idxVararg ++] = ${eval.isNull} ? (UTF8String) null : ${eval.value};"
})
case _: ArrayType =>
val size = ctx.freshName("n")
if (eval.isNull == "true") {
("", "")
} else {
(s"""
if (!${eval.isNull}) {
$varargNum += ${eval.value}.numElements();
}
""",
s"""
if (!${eval.isNull}) {
final int $size = ${eval.value}.numElements();
for (int j = 0; j < $size; j ++) {
$array[$idxVararg ++] = ${CodeGenerator.getValue(eval.value, StringType, "j")};
}
}
""")
}
}
}.unzip
val codes = ctx.splitExpressionsWithCurrentInputs(evals.map(_.code))
val varargCounts = ctx.splitExpressionsWithCurrentInputs(
expressions = varargCount,
funcName = "varargCountsConcatWs",
returnType = "int",
makeSplitFunction = body =>
s"""
|int $varargNum = 0;
|$body
|return $varargNum;
""".stripMargin,
foldFunctions = _.map(funcCall => s"$varargNum += $funcCall;").mkString("\n"))
val varargBuilds = ctx.splitExpressionsWithCurrentInputs(
expressions = varargBuild,
funcName = "varargBuildsConcatWs",
extraArguments = ("UTF8String []", array) :: ("int", idxVararg) :: Nil,
returnType = "int",
makeSplitFunction = body =>
s"""
|$body
|return $idxVararg;
""".stripMargin,
foldFunctions = _.map(funcCall => s"$idxVararg = $funcCall;").mkString("\n"))
ev.copy(
s"""
$codes
int $varargNum = ${children.count(_.dataType == StringType) - 1};
int $idxVararg = 0;
$varargCounts
UTF8String[] $array = new UTF8String[$varargNum];
$varargBuilds
UTF8String ${ev.value} = UTF8String.concatWs(${evals.head.value}, $array);
boolean ${ev.isNull} = ${ev.value} == null;
""")
}
}
}
/**
* An expression that returns the `n`-th input in given inputs.
* If all inputs are binary, `elt` returns an output as binary. Otherwise, it returns as string.
* If any input is null, `elt` returns null.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(n, input1, input2, ...) - Returns the `n`-th input, e.g., returns `input2` when `n` is 2.",
examples = """
Examples:
> SELECT _FUNC_(1, 'scala', 'java');
scala
""")
// scalastyle:on line.size.limit
case class Elt(children: Seq[Expression]) extends Expression {
private lazy val indexExpr = children.head
private lazy val inputExprs = children.tail.toArray
/** This expression is always nullable because it returns null if index is out of range. */
override def nullable: Boolean = true
override def dataType: DataType = inputExprs.map(_.dataType).headOption.getOrElse(StringType)
override def checkInputDataTypes(): TypeCheckResult = {
if (children.size < 2) {
TypeCheckResult.TypeCheckFailure("elt function requires at least two arguments")
} else {
val (indexType, inputTypes) = (indexExpr.dataType, inputExprs.map(_.dataType))
if (indexType != IntegerType) {
return TypeCheckResult.TypeCheckFailure(s"first input to function $prettyName should " +
s"have IntegerType, but it's $indexType")
}
if (inputTypes.exists(tpe => !Seq(StringType, BinaryType).contains(tpe))) {
return TypeCheckResult.TypeCheckFailure(
s"input to function $prettyName should have StringType or BinaryType, but it's " +
inputTypes.map(_.simpleString).mkString("[", ", ", "]"))
}
TypeUtils.checkForSameTypeInputExpr(inputTypes, s"function $prettyName")
}
}
override def eval(input: InternalRow): Any = {
val indexObj = indexExpr.eval(input)
if (indexObj == null) {
null
} else {
val index = indexObj.asInstanceOf[Int]
if (index <= 0 || index > inputExprs.length) {
null
} else {
inputExprs(index - 1).eval(input)
}
}
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val index = indexExpr.genCode(ctx)
val inputs = inputExprs.map(_.genCode(ctx))
val indexVal = ctx.freshName("index")
val indexMatched = ctx.freshName("eltIndexMatched")
val inputVal = ctx.addMutableState(CodeGenerator.javaType(dataType), "inputVal")
val assignInputValue = inputs.zipWithIndex.map { case (eval, index) =>
s"""
|if ($indexVal == ${index + 1}) {
| ${eval.code}
| $inputVal = ${eval.isNull} ? null : ${eval.value};
| $indexMatched = true;
| continue;
|}
""".stripMargin
}
val codes = ctx.splitExpressionsWithCurrentInputs(
expressions = assignInputValue,
funcName = "eltFunc",
extraArguments = ("int", indexVal) :: Nil,
returnType = CodeGenerator.JAVA_BOOLEAN,
makeSplitFunction = body =>
s"""
|${CodeGenerator.JAVA_BOOLEAN} $indexMatched = false;
|do {
| $body
|} while (false);
|return $indexMatched;
""".stripMargin,
foldFunctions = _.map { funcCall =>
s"""
|$indexMatched = $funcCall;
|if ($indexMatched) {
| continue;
|}
""".stripMargin
}.mkString)
ev.copy(
s"""
|${index.code}
|final int $indexVal = ${index.value};
|${CodeGenerator.JAVA_BOOLEAN} $indexMatched = false;
|$inputVal = null;
|do {
| $codes
|} while (false);
|final ${CodeGenerator.javaType(dataType)} ${ev.value} = $inputVal;
|final boolean ${ev.isNull} = ${ev.value} == null;
""".stripMargin)
}
}
trait String2StringExpression extends ImplicitCastInputTypes {
self: UnaryExpression =>
def convert(v: UTF8String): UTF8String
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType)
protected override def nullSafeEval(input: Any): Any =
convert(input.asInstanceOf[UTF8String])
}
/**
* A function that converts the characters of a string to uppercase.
*/
@ExpressionDescription(
usage = "_FUNC_(str) - Returns `str` with all characters changed to uppercase.",
examples = """
Examples:
> SELECT _FUNC_('SparkSql');
SPARKSQL
""")
case class Upper(child: Expression)
extends UnaryExpression with String2StringExpression {
override def convert(v: UTF8String): UTF8String = v.toUpperCase
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, c => s"($c).toUpperCase()")
}
}
/**
* A function that converts the characters of a string to lowercase.
*/
@ExpressionDescription(
usage = "_FUNC_(str) - Returns `str` with all characters changed to lowercase.",
examples = """
Examples:
> SELECT _FUNC_('SparkSql');
sparksql
""")
case class Lower(child: Expression) extends UnaryExpression with String2StringExpression {
override def convert(v: UTF8String): UTF8String = v.toLowerCase
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, c => s"($c).toLowerCase()")
}
}
/** A base trait for functions that compare two strings, returning a boolean. */
abstract class StringPredicate extends BinaryExpression
with Predicate with ImplicitCastInputTypes with NullIntolerant {
def compare(l: UTF8String, r: UTF8String): Boolean
override def inputTypes: Seq[DataType] = Seq(StringType, StringType)
protected override def nullSafeEval(input1: Any, input2: Any): Any =
compare(input1.asInstanceOf[UTF8String], input2.asInstanceOf[UTF8String])
override def toString: String = s"$nodeName($left, $right)"
}
/**
* A function that returns true if the string `left` contains the string `right`.
*/
case class Contains(left: Expression, right: Expression) extends StringPredicate {
override def compare(l: UTF8String, r: UTF8String): Boolean = l.contains(r)
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (c1, c2) => s"($c1).contains($c2)")
}
}
/**
* A function that returns true if the string `left` starts with the string `right`.
*/
case class StartsWith(left: Expression, right: Expression) extends StringPredicate {
override def compare(l: UTF8String, r: UTF8String): Boolean = l.startsWith(r)
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (c1, c2) => s"($c1).startsWith($c2)")
}
}
/**
* A function that returns true if the string `left` ends with the string `right`.
*/
case class EndsWith(left: Expression, right: Expression) extends StringPredicate {
override def compare(l: UTF8String, r: UTF8String): Boolean = l.endsWith(r)
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (c1, c2) => s"($c1).endsWith($c2)")
}
}
/**
* Replace all occurrences with string.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(str, search[, replace]) - Replaces all occurrences of `search` with `replace`.",
arguments = """
Arguments:
* str - a string expression
* search - a string expression. If `search` is not found in `str`, `str` is returned unchanged.
* replace - a string expression. If `replace` is not specified or is an empty string, nothing replaces
the string that is removed from `str`.
""",
examples = """
Examples:
> SELECT _FUNC_('ABCabc', 'abc', 'DEF');
ABCDEF
""")
// scalastyle:on line.size.limit
case class StringReplace(srcExpr: Expression, searchExpr: Expression, replaceExpr: Expression)
extends TernaryExpression with ImplicitCastInputTypes {
def this(srcExpr: Expression, searchExpr: Expression) = {
this(srcExpr, searchExpr, Literal(""))
}
override def nullSafeEval(srcEval: Any, searchEval: Any, replaceEval: Any): Any = {
srcEval.asInstanceOf[UTF8String].replace(
searchEval.asInstanceOf[UTF8String], replaceEval.asInstanceOf[UTF8String])
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (src, search, replace) => {
s"""${ev.value} = $src.replace($search, $replace);"""
})
}
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType, StringType, StringType)
override def children: Seq[Expression] = srcExpr :: searchExpr :: replaceExpr :: Nil
override def prettyName: String = "replace"
}
object StringTranslate {
def buildDict(matchingString: UTF8String, replaceString: UTF8String)
: JMap[Character, Character] = {
val matching = matchingString.toString()
val replace = replaceString.toString()
val dict = new HashMap[Character, Character]()
var i = 0
while (i < matching.length()) {
val rep = if (i < replace.length()) replace.charAt(i) else '\u0000'
if (null == dict.get(matching.charAt(i))) {
dict.put(matching.charAt(i), rep)
}
i += 1
}
dict
}
}
/**
* A function translate any character in the `srcExpr` by a character in `replaceExpr`.
* The characters in `replaceExpr` is corresponding to the characters in `matchingExpr`.
* The translate will happen when any character in the string matching with the character
* in the `matchingExpr`.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(input, from, to) - Translates the `input` string by replacing the characters present in the `from` string with the corresponding characters in the `to` string.",
examples = """
Examples:
> SELECT _FUNC_('AaBbCc', 'abc', '123');
A1B2C3
""")
// scalastyle:on line.size.limit
case class StringTranslate(srcExpr: Expression, matchingExpr: Expression, replaceExpr: Expression)
extends TernaryExpression with ImplicitCastInputTypes {
@transient private var lastMatching: UTF8String = _
@transient private var lastReplace: UTF8String = _
@transient private var dict: JMap[Character, Character] = _
override def nullSafeEval(srcEval: Any, matchingEval: Any, replaceEval: Any): Any = {
if (matchingEval != lastMatching || replaceEval != lastReplace) {
lastMatching = matchingEval.asInstanceOf[UTF8String].clone()
lastReplace = replaceEval.asInstanceOf[UTF8String].clone()
dict = StringTranslate.buildDict(lastMatching, lastReplace)
}
srcEval.asInstanceOf[UTF8String].translate(dict)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val classNameDict = classOf[JMap[Character, Character]].getCanonicalName
val termLastMatching = ctx.addMutableState("UTF8String", "lastMatching")
val termLastReplace = ctx.addMutableState("UTF8String", "lastReplace")
val termDict = ctx.addMutableState(classNameDict, "dict")
nullSafeCodeGen(ctx, ev, (src, matching, replace) => {
val check = if (matchingExpr.foldable && replaceExpr.foldable) {
s"$termDict == null"
} else {
s"!$matching.equals($termLastMatching) || !$replace.equals($termLastReplace)"
}
s"""if ($check) {
// Not all of them is literal or matching or replace value changed
$termLastMatching = $matching.clone();
$termLastReplace = $replace.clone();
$termDict = org.apache.spark.sql.catalyst.expressions.StringTranslate
.buildDict($termLastMatching, $termLastReplace);
}
${ev.value} = $src.translate($termDict);
"""
})
}
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType, StringType, StringType)
override def children: Seq[Expression] = srcExpr :: matchingExpr :: replaceExpr :: Nil
override def prettyName: String = "translate"
}
/**
* A function that returns the index (1-based) of the given string (left) in the comma-
* delimited list (right). Returns 0, if the string wasn't found or if the given
* string (left) contains a comma.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = """
_FUNC_(str, str_array) - Returns the index (1-based) of the given string (`str`) in the comma-delimited list (`str_array`).
Returns 0, if the string was not found or if the given string (`str`) contains a comma.
""",
examples = """
Examples:
> SELECT _FUNC_('ab','abc,b,ab,c,def');
3
""")
// scalastyle:on line.size.limit
case class FindInSet(left: Expression, right: Expression) extends BinaryExpression
with ImplicitCastInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(StringType, StringType)
override protected def nullSafeEval(word: Any, set: Any): Any =
set.asInstanceOf[UTF8String].findInSet(word.asInstanceOf[UTF8String])
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (word, set) =>
s"${ev.value} = $set.findInSet($word);"
)
}
override def dataType: DataType = IntegerType
override def prettyName: String = "find_in_set"
}
trait String2TrimExpression extends Expression with ImplicitCastInputTypes {
override def dataType: DataType = StringType
override def inputTypes: Seq[AbstractDataType] = Seq.fill(children.size)(StringType)
override def nullable: Boolean = children.exists(_.nullable)
override def foldable: Boolean = children.forall(_.foldable)
}
object StringTrim {
def apply(str: Expression, trimStr: Expression) : StringTrim = StringTrim(str, Some(trimStr))
def apply(str: Expression) : StringTrim = StringTrim(str, None)
}
/**
* A function that takes a character string, removes the leading and trailing characters matching
* with any character in the trim string, returns the new string.
* If BOTH and trimStr keywords are not specified, it defaults to remove space character from both
* ends. The trim function will have one argument, which contains the source string.
* If BOTH and trimStr keywords are specified, it trims the characters from both ends, and the trim
* function will have two arguments, the first argument contains trimStr, the second argument
* contains the source string.
* trimStr: A character string to be trimmed from the source string, if it has multiple characters,
* the function searches for each character in the source string, removes the characters from the
* source string until it encounters the first non-match character.
* BOTH: removes any character from both ends of the source string that matches characters in the
* trim string.
*/
@ExpressionDescription(
usage = """
_FUNC_(str) - Removes the leading and trailing space characters from `str`.
_FUNC_(BOTH trimStr FROM str) - Remove the leading and trailing `trimStr` characters from `str`
_FUNC_(LEADING trimStr FROM str) - Remove the leading `trimStr` characters from `str`
_FUNC_(TRAILING trimStr FROM str) - Remove the trailing `trimStr` characters from `str`
""",
arguments = """
Arguments:
* str - a string expression
* trimStr - the trim string characters to trim, the default value is a single space
* BOTH, FROM - these are keywords to specify trimming string characters from both ends of
the string
* LEADING, FROM - these are keywords to specify trimming string characters from the left
end of the string
* TRAILING, FROM - these are keywords to specify trimming string characters from the right
end of the string
""",
examples = """
Examples:
> SELECT _FUNC_(' SparkSQL ');
SparkSQL
> SELECT _FUNC_('SL', 'SSparkSQLS');
parkSQ
> SELECT _FUNC_(BOTH 'SL' FROM 'SSparkSQLS');
parkSQ
> SELECT _FUNC_(LEADING 'SL' FROM 'SSparkSQLS');
parkSQLS
> SELECT _FUNC_(TRAILING 'SL' FROM 'SSparkSQLS');
SSparkSQ
""")
case class StringTrim(
srcStr: Expression,
trimStr: Option[Expression] = None)
extends String2TrimExpression {
def this(trimStr: Expression, srcStr: Expression) = this(srcStr, Option(trimStr))
def this(srcStr: Expression) = this(srcStr, None)
override def prettyName: String = "trim"
override def children: Seq[Expression] = if (trimStr.isDefined) {
srcStr :: trimStr.get :: Nil
} else {
srcStr :: Nil
}
override def eval(input: InternalRow): Any = {
val srcString = srcStr.eval(input).asInstanceOf[UTF8String]
if (srcString == null) {
null
} else {
if (trimStr.isDefined) {
srcString.trim(trimStr.get.eval(input).asInstanceOf[UTF8String])
} else {
srcString.trim()
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val evals = children.map(_.genCode(ctx))
val srcString = evals(0)
if (evals.length == 1) {
ev.copy(evals.map(_.code).mkString + s"""
boolean ${ev.isNull} = false;
UTF8String ${ev.value} = null;
if (${srcString.isNull}) {
${ev.isNull} = true;
} else {
${ev.value} = ${srcString.value}.trim();
}""")
} else {
val trimString = evals(1)
val getTrimFunction =
s"""
if (${trimString.isNull}) {
${ev.isNull} = true;
} else {
${ev.value} = ${srcString.value}.trim(${trimString.value});
}"""
ev.copy(evals.map(_.code).mkString + s"""
boolean ${ev.isNull} = false;
UTF8String ${ev.value} = null;
if (${srcString.isNull}) {
${ev.isNull} = true;
} else {
$getTrimFunction
}""")
}
}
}
object StringTrimLeft {
def apply(str: Expression, trimStr: Expression): StringTrimLeft =
StringTrimLeft(str, Some(trimStr))
def apply(str: Expression): StringTrimLeft = StringTrimLeft(str, None)
}
/**
* A function that trims the characters from left end for a given string.
* If LEADING and trimStr keywords are not specified, it defaults to remove space character from
* the left end. The ltrim function will have one argument, which contains the source string.
* If LEADING and trimStr keywords are not specified, it trims the characters from left end. The
* ltrim function will have two arguments, the first argument contains trimStr, the second argument
* contains the source string.
* trimStr: the function removes any character from the left end of the source string which matches
* with the characters from trimStr, it stops at the first non-match character.
* LEADING: removes any character from the left end of the source string that matches characters in
* the trim string.
*/
@ExpressionDescription(
usage = """
_FUNC_(str) - Removes the leading space characters from `str`.
_FUNC_(trimStr, str) - Removes the leading string contains the characters from the trim string
""",
arguments = """
Arguments:
* str - a string expression
* trimStr - the trim string characters to trim, the default value is a single space
""",
examples = """
Examples:
> SELECT _FUNC_(' SparkSQL ');
SparkSQL
> SELECT _FUNC_('Sp', 'SSparkSQLS');
arkSQLS
""")
case class StringTrimLeft(
srcStr: Expression,
trimStr: Option[Expression] = None)
extends String2TrimExpression {
def this(trimStr: Expression, srcStr: Expression) = this(srcStr, Option(trimStr))
def this(srcStr: Expression) = this(srcStr, None)
override def prettyName: String = "ltrim"
override def children: Seq[Expression] = if (trimStr.isDefined) {
srcStr :: trimStr.get :: Nil
} else {
srcStr :: Nil
}
override def eval(input: InternalRow): Any = {
val srcString = srcStr.eval(input).asInstanceOf[UTF8String]
if (srcString == null) {
null
} else {
if (trimStr.isDefined) {
srcString.trimLeft(trimStr.get.eval(input).asInstanceOf[UTF8String])
} else {
srcString.trimLeft()
}
}
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val evals = children.map(_.genCode(ctx))
val srcString = evals(0)
if (evals.length == 1) {
ev.copy(evals.map(_.code).mkString + s"""
boolean ${ev.isNull} = false;
UTF8String ${ev.value} = null;
if (${srcString.isNull}) {
${ev.isNull} = true;
} else {
${ev.value} = ${srcString.value}.trimLeft();
}""")
} else {
val trimString = evals(1)
val getTrimLeftFunction =
s"""
if (${trimString.isNull}) {
${ev.isNull} = true;
} else {
${ev.value} = ${srcString.value}.trimLeft(${trimString.value});
}"""
ev.copy(evals.map(_.code).mkString + s"""
boolean ${ev.isNull} = false;
UTF8String ${ev.value} = null;
if (${srcString.isNull}) {
${ev.isNull} = true;
} else {
$getTrimLeftFunction
}""")
}
}
}
object StringTrimRight {
def apply(str: Expression, trimStr: Expression): StringTrimRight =
StringTrimRight(str, Some(trimStr))
def apply(str: Expression) : StringTrimRight = StringTrimRight(str, None)
}
/**
* A function that trims the characters from right end for a given string.
* If TRAILING and trimStr keywords are not specified, it defaults to remove space character
* from the right end. The rtrim function will have one argument, which contains the source string.
* If TRAILING and trimStr keywords are specified, it trims the characters from right end. The
* rtrim function will have two arguments, the first argument contains trimStr, the second argument
* contains the source string.
* trimStr: the function removes any character from the right end of source string which matches
* with the characters from trimStr, it stops at the first non-match character.
* TRAILING: removes any character from the right end of the source string that matches characters
* in the trim string.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = """
_FUNC_(str) - Removes the trailing space characters from `str`.
_FUNC_(trimStr, str) - Removes the trailing string which contains the characters from the trim string from the `str`
""",
arguments = """
Arguments:
* str - a string expression
* trimStr - the trim string characters to trim, the default value is a single space
""",
examples = """
Examples:
> SELECT _FUNC_(' SparkSQL ');
SparkSQL
> SELECT _FUNC_('LQSa', 'SSparkSQLS');
SSpark
""")
// scalastyle:on line.size.limit
case class StringTrimRight(
srcStr: Expression,
trimStr: Option[Expression] = None)
extends String2TrimExpression {
def this(trimStr: Expression, srcStr: Expression) = this(srcStr, Option(trimStr))
def this(srcStr: Expression) = this(srcStr, None)
override def prettyName: String = "rtrim"
override def children: Seq[Expression] = if (trimStr.isDefined) {
srcStr :: trimStr.get :: Nil
} else {
srcStr :: Nil
}
override def eval(input: InternalRow): Any = {
val srcString = srcStr.eval(input).asInstanceOf[UTF8String]
if (srcString == null) {
null
} else {
if (trimStr.isDefined) {
srcString.trimRight(trimStr.get.eval(input).asInstanceOf[UTF8String])
} else {
srcString.trimRight()
}
}
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val evals = children.map(_.genCode(ctx))
val srcString = evals(0)
if (evals.length == 1) {
ev.copy(evals.map(_.code).mkString + s"""
boolean ${ev.isNull} = false;
UTF8String ${ev.value} = null;
if (${srcString.isNull}) {
${ev.isNull} = true;
} else {
${ev.value} = ${srcString.value}.trimRight();
}""")
} else {
val trimString = evals(1)
val getTrimRightFunction =
s"""
if (${trimString.isNull}) {
${ev.isNull} = true;
} else {
${ev.value} = ${srcString.value}.trimRight(${trimString.value});
}"""
ev.copy(evals.map(_.code).mkString + s"""
boolean ${ev.isNull} = false;
UTF8String ${ev.value} = null;
if (${srcString.isNull}) {
${ev.isNull} = true;
} else {
$getTrimRightFunction
}""")
}
}
}
/**
* A function that returns the position of the first occurrence of substr in the given string.
* Returns null if either of the arguments are null and
* returns 0 if substr could not be found in str.
*
* NOTE: that this is not zero based, but 1-based index. The first character in str has index 1.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(str, substr) - Returns the (1-based) index of the first occurrence of `substr` in `str`.",
examples = """
Examples:
> SELECT _FUNC_('SparkSQL', 'SQL');
6
""")
// scalastyle:on line.size.limit
case class StringInstr(str: Expression, substr: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def left: Expression = str
override def right: Expression = substr
override def dataType: DataType = IntegerType
override def inputTypes: Seq[DataType] = Seq(StringType, StringType)
override def nullSafeEval(string: Any, sub: Any): Any = {
string.asInstanceOf[UTF8String].indexOf(sub.asInstanceOf[UTF8String], 0) + 1
}
override def prettyName: String = "instr"
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (l, r) =>
s"($l).indexOf($r, 0) + 1")
}
}
/**
* Returns the substring from string str before count occurrences of the delimiter delim.
* If count is positive, everything the left of the final delimiter (counting from left) is
* returned. If count is negative, every to the right of the final delimiter (counting from the
* right) is returned. substring_index performs a case-sensitive match when searching for delim.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = """
_FUNC_(str, delim, count) - Returns the substring from `str` before `count` occurrences of the delimiter `delim`.
If `count` is positive, everything to the left of the final delimiter (counting from the
left) is returned. If `count` is negative, everything to the right of the final delimiter
(counting from the right) is returned. The function substring_index performs a case-sensitive match
when searching for `delim`.
""",
examples = """
Examples:
> SELECT _FUNC_('www.apache.org', '.', 2);
www.apache
""")
// scalastyle:on line.size.limit
case class SubstringIndex(strExpr: Expression, delimExpr: Expression, countExpr: Expression)
extends TernaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType, StringType, IntegerType)
override def children: Seq[Expression] = Seq(strExpr, delimExpr, countExpr)
override def prettyName: String = "substring_index"
override def nullSafeEval(str: Any, delim: Any, count: Any): Any = {
str.asInstanceOf[UTF8String].subStringIndex(
delim.asInstanceOf[UTF8String],
count.asInstanceOf[Int])
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (str, delim, count) => s"$str.subStringIndex($delim, $count)")
}
}
/**
* A function that returns the position of the first occurrence of substr
* in given string after position pos.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = """
_FUNC_(substr, str[, pos]) - Returns the position of the first occurrence of `substr` in `str` after position `pos`.
The given `pos` and return value are 1-based.
""",
examples = """
Examples:
> SELECT _FUNC_('bar', 'foobarbar');
4
> SELECT _FUNC_('bar', 'foobarbar', 5);
7
> SELECT POSITION('bar' IN 'foobarbar');
4
""")
// scalastyle:on line.size.limit
case class StringLocate(substr: Expression, str: Expression, start: Expression)
extends TernaryExpression with ImplicitCastInputTypes {
def this(substr: Expression, str: Expression) = {
this(substr, str, Literal(1))
}
override def children: Seq[Expression] = substr :: str :: start :: Nil
override def nullable: Boolean = substr.nullable || str.nullable
override def dataType: DataType = IntegerType
override def inputTypes: Seq[DataType] = Seq(StringType, StringType, IntegerType)
override def eval(input: InternalRow): Any = {
val s = start.eval(input)
if (s == null) {
// if the start position is null, we need to return 0, (conform to Hive)
0
} else {
val r = substr.eval(input)
if (r == null) {
null
} else {
val l = str.eval(input)
if (l == null) {
null
} else {
val sVal = s.asInstanceOf[Int]
if (sVal < 1) {
0
} else {
l.asInstanceOf[UTF8String].indexOf(
r.asInstanceOf[UTF8String],
s.asInstanceOf[Int] - 1) + 1
}
}
}
}
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val substrGen = substr.genCode(ctx)
val strGen = str.genCode(ctx)
val startGen = start.genCode(ctx)
ev.copy(code = s"""
int ${ev.value} = 0;
boolean ${ev.isNull} = false;
${startGen.code}
if (!${startGen.isNull}) {
${substrGen.code}
if (!${substrGen.isNull}) {
${strGen.code}
if (!${strGen.isNull}) {
if (${startGen.value} > 0) {
${ev.value} = ${strGen.value}.indexOf(${substrGen.value},
${startGen.value} - 1) + 1;
}
} else {
${ev.isNull} = true;
}
} else {
${ev.isNull} = true;
}
}
""")
}
override def prettyName: String = "locate"
}
/**
* Returns str, left-padded with pad to a length of len.
*/
@ExpressionDescription(
usage = """
_FUNC_(str, len, pad) - Returns `str`, left-padded with `pad` to a length of `len`.
If `str` is longer than `len`, the return value is shortened to `len` characters.
""",
examples = """
Examples:
> SELECT _FUNC_('hi', 5, '??');
???hi
> SELECT _FUNC_('hi', 1, '??');
h
""")
case class StringLPad(str: Expression, len: Expression, pad: Expression)
extends TernaryExpression with ImplicitCastInputTypes {
override def children: Seq[Expression] = str :: len :: pad :: Nil
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType, IntegerType, StringType)
override def nullSafeEval(str: Any, len: Any, pad: Any): Any = {
str.asInstanceOf[UTF8String].lpad(len.asInstanceOf[Int], pad.asInstanceOf[UTF8String])
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (str, len, pad) => s"$str.lpad($len, $pad)")
}
override def prettyName: String = "lpad"
}
/**
* Returns str, right-padded with pad to a length of len.
*/
@ExpressionDescription(
usage = """
_FUNC_(str, len, pad) - Returns `str`, right-padded with `pad` to a length of `len`.
If `str` is longer than `len`, the return value is shortened to `len` characters.
""",
examples = """
Examples:
> SELECT _FUNC_('hi', 5, '??');
hi???
> SELECT _FUNC_('hi', 1, '??');
h
""")
case class StringRPad(str: Expression, len: Expression, pad: Expression)
extends TernaryExpression with ImplicitCastInputTypes {
override def children: Seq[Expression] = str :: len :: pad :: Nil
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType, IntegerType, StringType)
override def nullSafeEval(str: Any, len: Any, pad: Any): Any = {
str.asInstanceOf[UTF8String].rpad(len.asInstanceOf[Int], pad.asInstanceOf[UTF8String])
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (str, len, pad) => s"$str.rpad($len, $pad)")
}
override def prettyName: String = "rpad"
}
object ParseUrl {
private val HOST = UTF8String.fromString("HOST")
private val PATH = UTF8String.fromString("PATH")
private val QUERY = UTF8String.fromString("QUERY")
private val REF = UTF8String.fromString("REF")
private val PROTOCOL = UTF8String.fromString("PROTOCOL")
private val FILE = UTF8String.fromString("FILE")
private val AUTHORITY = UTF8String.fromString("AUTHORITY")
private val USERINFO = UTF8String.fromString("USERINFO")
private val REGEXPREFIX = "(&|^)"
private val REGEXSUBFIX = "=([^&]*)"
}
/**
* Extracts a part from a URL
*/
@ExpressionDescription(
usage = "_FUNC_(url, partToExtract[, key]) - Extracts a part from a URL.",
examples = """
Examples:
> SELECT _FUNC_('http://spark.apache.org/path?query=1', 'HOST')
spark.apache.org
> SELECT _FUNC_('http://spark.apache.org/path?query=1', 'QUERY')
query=1
> SELECT _FUNC_('http://spark.apache.org/path?query=1', 'QUERY', 'query')
1
""")
case class ParseUrl(children: Seq[Expression])
extends Expression with ExpectsInputTypes with CodegenFallback {
override def nullable: Boolean = true
override def inputTypes: Seq[DataType] = Seq.fill(children.size)(StringType)
override def dataType: DataType = StringType
override def prettyName: String = "parse_url"
// If the url is a constant, cache the URL object so that we don't need to convert url
// from UTF8String to String to URL for every row.
@transient private lazy val cachedUrl = children(0) match {
case Literal(url: UTF8String, _) if url ne null => getUrl(url)
case _ => null
}
// If the key is a constant, cache the Pattern object so that we don't need to convert key
// from UTF8String to String to StringBuilder to String to Pattern for every row.
@transient private lazy val cachedPattern = children(2) match {
case Literal(key: UTF8String, _) if key ne null => getPattern(key)
case _ => null
}
// If the partToExtract is a constant, cache the Extract part function so that we don't need
// to check the partToExtract for every row.
@transient private lazy val cachedExtractPartFunc = children(1) match {
case Literal(part: UTF8String, _) => getExtractPartFunc(part)
case _ => null
}
import ParseUrl._
override def checkInputDataTypes(): TypeCheckResult = {
if (children.size > 3 || children.size < 2) {
TypeCheckResult.TypeCheckFailure(s"$prettyName function requires two or three arguments")
} else {
super[ExpectsInputTypes].checkInputDataTypes()
}
}
private def getPattern(key: UTF8String): Pattern = {
Pattern.compile(REGEXPREFIX + key.toString + REGEXSUBFIX)
}
private def getUrl(url: UTF8String): URI = {
try {
new URI(url.toString)
} catch {
case e: URISyntaxException => null
}
}
private def getExtractPartFunc(partToExtract: UTF8String): URI => String = {
// partToExtract match {
// case HOST => _.toURL().getHost
// case PATH => _.toURL().getPath
// case QUERY => _.toURL().getQuery
// case REF => _.toURL().getRef
// case PROTOCOL => _.toURL().getProtocol
// case FILE => _.toURL().getFile
// case AUTHORITY => _.toURL().getAuthority
// case USERINFO => _.toURL().getUserInfo
// case _ => (url: URI) => null
// }
partToExtract match {
case HOST => _.getHost
case PATH => _.getRawPath
case QUERY => _.getRawQuery
case REF => _.getRawFragment
case PROTOCOL => _.getScheme
case FILE =>
(url: URI) =>
if (url.getRawQuery ne null) {
url.getRawPath + "?" + url.getRawQuery
} else {
url.getRawPath
}
case AUTHORITY => _.getRawAuthority
case USERINFO => _.getRawUserInfo
case _ => (url: URI) => null
}
}
private def extractValueFromQuery(query: UTF8String, pattern: Pattern): UTF8String = {
val m = pattern.matcher(query.toString)
if (m.find()) {
UTF8String.fromString(m.group(2))
} else {
null
}
}
private def extractFromUrl(url: URI, partToExtract: UTF8String): UTF8String = {
if (cachedExtractPartFunc ne null) {
UTF8String.fromString(cachedExtractPartFunc.apply(url))
} else {
UTF8String.fromString(getExtractPartFunc(partToExtract).apply(url))
}
}
private def parseUrlWithoutKey(url: UTF8String, partToExtract: UTF8String): UTF8String = {
if (cachedUrl ne null) {
extractFromUrl(cachedUrl, partToExtract)
} else {
val currentUrl = getUrl(url)
if (currentUrl ne null) {
extractFromUrl(currentUrl, partToExtract)
} else {
null
}
}
}
override def eval(input: InternalRow): Any = {
val evaluated = children.map{e => e.eval(input).asInstanceOf[UTF8String]}
if (evaluated.contains(null)) return null
if (evaluated.size == 2) {
parseUrlWithoutKey(evaluated(0), evaluated(1))
} else {
// 3-arg, i.e. QUERY with key
assert(evaluated.size == 3)
if (evaluated(1) != QUERY) {
return null
}
val query = parseUrlWithoutKey(evaluated(0), evaluated(1))
if (query eq null) {
return null
}
if (cachedPattern ne null) {
extractValueFromQuery(query, cachedPattern)
} else {
extractValueFromQuery(query, getPattern(evaluated(2)))
}
}
}
}
/**
* Returns the input formatted according do printf-style format strings
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(strfmt, obj, ...) - Returns a formatted string from printf-style format strings.",
examples = """
Examples:
> SELECT _FUNC_("Hello World %d %s", 100, "days");
Hello World 100 days
""")
// scalastyle:on line.size.limit
case class FormatString(children: Expression*) extends Expression with ImplicitCastInputTypes {
require(children.nonEmpty, "format_string() should take at least 1 argument")
override def foldable: Boolean = children.forall(_.foldable)
override def nullable: Boolean = children(0).nullable
override def dataType: DataType = StringType
override def inputTypes: Seq[AbstractDataType] =
StringType :: List.fill(children.size - 1)(AnyDataType)
override def eval(input: InternalRow): Any = {
val pattern = children(0).eval(input)
if (pattern == null) {
null
} else {
val sb = new StringBuffer()
val formatter = new java.util.Formatter(sb, Locale.US)
val arglist = children.tail.map(_.eval(input).asInstanceOf[AnyRef])
formatter.format(pattern.asInstanceOf[UTF8String].toString, arglist: _*)
UTF8String.fromString(sb.toString)
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val pattern = children.head.genCode(ctx)
val argListGen = children.tail.map(x => (x.dataType, x.genCode(ctx)))
val argList = ctx.freshName("argLists")
val numArgLists = argListGen.length
val argListCode = argListGen.zipWithIndex.map { case(v, index) =>
val value =
if (CodeGenerator.boxedType(v._1) != CodeGenerator.javaType(v._1)) {
// Java primitives get boxed in order to allow null values.
s"(${v._2.isNull}) ? (${CodeGenerator.boxedType(v._1)}) null : " +
s"new ${CodeGenerator.boxedType(v._1)}(${v._2.value})"
} else {
s"(${v._2.isNull}) ? null : ${v._2.value}"
}
s"""
${v._2.code}
$argList[$index] = $value;
"""
}
val argListCodes = ctx.splitExpressionsWithCurrentInputs(
expressions = argListCode,
funcName = "valueFormatString",
extraArguments = ("Object[]", argList) :: Nil)
val form = ctx.freshName("formatter")
val formatter = classOf[java.util.Formatter].getName
val sb = ctx.freshName("sb")
val stringBuffer = classOf[StringBuffer].getName
ev.copy(code = s"""
${pattern.code}
boolean ${ev.isNull} = ${pattern.isNull};
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
if (!${ev.isNull}) {
$stringBuffer $sb = new $stringBuffer();
$formatter $form = new $formatter($sb, ${classOf[Locale].getName}.US);
Object[] $argList = new Object[$numArgLists];
$argListCodes
$form.format(${pattern.value}.toString(), $argList);
${ev.value} = UTF8String.fromString($sb.toString());
}""")
}
override def prettyName: String = "format_string"
}
/**
* Returns string, with the first letter of each word in uppercase, all other letters in lowercase.
* Words are delimited by whitespace.
*/
@ExpressionDescription(
usage = """
_FUNC_(str) - Returns `str` with the first letter of each word in uppercase.
All other letters are in lowercase. Words are delimited by white space.
""",
examples = """
Examples:
> SELECT _FUNC_('sPark sql');
Spark Sql
""")
case class InitCap(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def inputTypes: Seq[DataType] = Seq(StringType)
override def dataType: DataType = StringType
override def nullSafeEval(string: Any): Any = {
string.asInstanceOf[UTF8String].toLowerCase.toTitleCase
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, str => s"$str.toLowerCase().toTitleCase()")
}
}
/**
* Returns the string which repeat the given string value n times.
*/
@ExpressionDescription(
usage = "_FUNC_(str, n) - Returns the string which repeats the given string value n times.",
examples = """
Examples:
> SELECT _FUNC_('123', 2);
123123
""")
case class StringRepeat(str: Expression, times: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def left: Expression = str
override def right: Expression = times
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType, IntegerType)
override def nullSafeEval(string: Any, n: Any): Any = {
string.asInstanceOf[UTF8String].repeat(n.asInstanceOf[Integer])
}
override def prettyName: String = "repeat"
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (l, r) => s"($l).repeat($r)")
}
}
/**
* Returns the reversed given string.
*/
@ExpressionDescription(
usage = "_FUNC_(str) - Returns the reversed given string.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL');
LQS krapS
""")
case class StringReverse(child: Expression) extends UnaryExpression with String2StringExpression {
override def convert(v: UTF8String): UTF8String = v.reverse()
override def prettyName: String = "reverse"
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, c => s"($c).reverse()")
}
}
/**
* Returns a string consisting of n spaces.
*/
@ExpressionDescription(
usage = "_FUNC_(n) - Returns a string consisting of `n` spaces.",
examples = """
Examples:
> SELECT concat(_FUNC_(2), '1');
1
""")
case class StringSpace(child: Expression)
extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(IntegerType)
override def nullSafeEval(s: Any): Any = {
val length = s.asInstanceOf[Int]
UTF8String.blankString(if (length < 0) 0 else length)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (length) =>
s"""${ev.value} = UTF8String.blankString(($length < 0) ? 0 : $length);""")
}
override def prettyName: String = "space"
}
/**
* A function that takes a substring of its first argument starting at a given position.
* Defined for String and Binary types.
*
* NOTE: that this is not zero based, but 1-based index. The first character in str has index 1.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(str, pos[, len]) - Returns the substring of `str` that starts at `pos` and is of length `len`, or the slice of byte array that starts at `pos` and is of length `len`.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL', 5);
k SQL
> SELECT _FUNC_('Spark SQL', -3);
SQL
> SELECT _FUNC_('Spark SQL', 5, 1);
k
""")
// scalastyle:on line.size.limit
case class Substring(str: Expression, pos: Expression, len: Expression)
extends TernaryExpression with ImplicitCastInputTypes with NullIntolerant {
def this(str: Expression, pos: Expression) = {
this(str, pos, Literal(Integer.MAX_VALUE))
}
override def dataType: DataType = str.dataType
override def inputTypes: Seq[AbstractDataType] =
Seq(TypeCollection(StringType, BinaryType), IntegerType, IntegerType)
override def children: Seq[Expression] = str :: pos :: len :: Nil
override def nullSafeEval(string: Any, pos: Any, len: Any): Any = {
str.dataType match {
case StringType => string.asInstanceOf[UTF8String]
.substringSQL(pos.asInstanceOf[Int], len.asInstanceOf[Int])
case BinaryType => ByteArray.subStringSQL(string.asInstanceOf[Array[Byte]],
pos.asInstanceOf[Int], len.asInstanceOf[Int])
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (string, pos, len) => {
str.dataType match {
case StringType => s"$string.substringSQL($pos, $len)"
case BinaryType => s"${classOf[ByteArray].getName}.subStringSQL($string, $pos, $len)"
}
})
}
}
/**
* Returns the rightmost n characters from the string.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(str, len) - Returns the rightmost `len`(`len` can be string type) characters from the string `str`,if `len` is less or equal than 0 the result is an empty string.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL', 3);
SQL
""")
// scalastyle:on line.size.limit
case class Right(str: Expression, len: Expression, child: Expression) extends RuntimeReplaceable {
def this(str: Expression, len: Expression) = {
this(str, len, If(IsNull(str), Literal(null, StringType), If(LessThanOrEqual(len, Literal(0)),
Literal(UTF8String.EMPTY_UTF8, StringType), new Substring(str, UnaryMinus(len)))))
}
override def flatArguments: Iterator[Any] = Iterator(str, len)
override def sql: String = s"$prettyName(${str.sql}, ${len.sql})"
}
/**
* Returns the leftmost n characters from the string.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(str, len) - Returns the leftmost `len`(`len` can be string type) characters from the string `str`,if `len` is less or equal than 0 the result is an empty string.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL', 3);
Spa
""")
// scalastyle:on line.size.limit
case class Left(str: Expression, len: Expression, child: Expression) extends RuntimeReplaceable {
def this(str: Expression, len: Expression) = {
this(str, len, Substring(str, Literal(1), len))
}
override def flatArguments: Iterator[Any] = Iterator(str, len)
override def sql: String = s"$prettyName(${str.sql}, ${len.sql})"
}
/**
* A function that returns the char length of the given string expression or
* number of bytes of the given binary expression.
*/
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns the character length of string data or number of bytes of " +
"binary data. The length of string data includes the trailing spaces. The length of binary " +
"data includes binary zeros.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL ');
10
> SELECT CHAR_LENGTH('Spark SQL ');
10
> SELECT CHARACTER_LENGTH('Spark SQL ');
10
""")
case class Length(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = IntegerType
override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(StringType, BinaryType))
protected override def nullSafeEval(value: Any): Any = child.dataType match {
case StringType => value.asInstanceOf[UTF8String].numChars
case BinaryType => value.asInstanceOf[Array[Byte]].length
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
child.dataType match {
case StringType => defineCodeGen(ctx, ev, c => s"($c).numChars()")
case BinaryType => defineCodeGen(ctx, ev, c => s"($c).length")
}
}
}
/**
* A function that returns the bit length of the given string or binary expression.
*/
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns the bit length of string data or number of bits of binary data.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL');
72
""")
case class BitLength(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = IntegerType
override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(StringType, BinaryType))
protected override def nullSafeEval(value: Any): Any = child.dataType match {
case StringType => value.asInstanceOf[UTF8String].numBytes * 8
case BinaryType => value.asInstanceOf[Array[Byte]].length * 8
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
child.dataType match {
case StringType => defineCodeGen(ctx, ev, c => s"($c).numBytes() * 8")
case BinaryType => defineCodeGen(ctx, ev, c => s"($c).length * 8")
}
}
override def prettyName: String = "bit_length"
}
/**
* A function that returns the byte length of the given string or binary expression.
*/
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns the byte length of string data or number of bytes of binary " +
"data.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL');
9
""")
case class OctetLength(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = IntegerType
override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(StringType, BinaryType))
protected override def nullSafeEval(value: Any): Any = child.dataType match {
case StringType => value.asInstanceOf[UTF8String].numBytes
case BinaryType => value.asInstanceOf[Array[Byte]].length
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
child.dataType match {
case StringType => defineCodeGen(ctx, ev, c => s"($c).numBytes()")
case BinaryType => defineCodeGen(ctx, ev, c => s"($c).length")
}
}
override def prettyName: String = "octet_length"
}
/**
* A function that return the Levenshtein distance between the two given strings.
*/
@ExpressionDescription(
usage = "_FUNC_(str1, str2) - Returns the Levenshtein distance between the two given strings.",
examples = """
Examples:
> SELECT _FUNC_('kitten', 'sitting');
3
""")
case class Levenshtein(left: Expression, right: Expression) extends BinaryExpression
with ImplicitCastInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(StringType, StringType)
override def dataType: DataType = IntegerType
protected override def nullSafeEval(leftValue: Any, rightValue: Any): Any =
leftValue.asInstanceOf[UTF8String].levenshteinDistance(rightValue.asInstanceOf[UTF8String])
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (left, right) =>
s"${ev.value} = $left.levenshteinDistance($right);")
}
}
/**
* A function that return Soundex code of the given string expression.
*/
@ExpressionDescription(
usage = "_FUNC_(str) - Returns Soundex code of the string.",
examples = """
Examples:
> SELECT _FUNC_('Miller');
M460
""")
case class SoundEx(child: Expression) extends UnaryExpression with ExpectsInputTypes {
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType)
override def nullSafeEval(input: Any): Any = input.asInstanceOf[UTF8String].soundex()
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, c => s"$c.soundex()")
}
}
/**
* Returns the numeric value of the first character of str.
*/
@ExpressionDescription(
usage = "_FUNC_(str) - Returns the numeric value of the first character of `str`.",
examples = """
Examples:
> SELECT _FUNC_('222');
50
> SELECT _FUNC_(2);
50
""")
case class Ascii(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = IntegerType
override def inputTypes: Seq[DataType] = Seq(StringType)
protected override def nullSafeEval(string: Any): Any = {
val bytes = string.asInstanceOf[UTF8String].getBytes
if (bytes.length > 0) {
bytes(0).asInstanceOf[Int]
} else {
0
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (child) => {
val bytes = ctx.freshName("bytes")
s"""
byte[] $bytes = $child.getBytes();
if ($bytes.length > 0) {
${ev.value} = (int) $bytes[0];
} else {
${ev.value} = 0;
}
"""})
}
}
/**
* Returns the ASCII character having the binary equivalent to n.
* If n is larger than 256 the result is equivalent to chr(n % 256)
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns the ASCII character having the binary equivalent to `expr`. If n is larger than 256 the result is equivalent to chr(n % 256)",
examples = """
Examples:
> SELECT _FUNC_(65);
A
""")
// scalastyle:on line.size.limit
case class Chr(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(LongType)
protected override def nullSafeEval(lon: Any): Any = {
val longVal = lon.asInstanceOf[Long]
if (longVal < 0) {
UTF8String.EMPTY_UTF8
} else if ((longVal & 0xFF) == 0) {
UTF8String.fromString(Character.MIN_VALUE.toString)
} else {
UTF8String.fromString((longVal & 0xFF).toChar.toString)
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, lon => {
s"""
if ($lon < 0) {
${ev.value} = UTF8String.EMPTY_UTF8;
} else if (($lon & 0xFF) == 0) {
${ev.value} = UTF8String.fromString(String.valueOf(Character.MIN_VALUE));
} else {
char c = (char)($lon & 0xFF);
${ev.value} = UTF8String.fromString(String.valueOf(c));
}
"""
})
}
}
/**
* Converts the argument from binary to a base 64 string.
*/
@ExpressionDescription(
usage = "_FUNC_(bin) - Converts the argument from a binary `bin` to a base 64 string.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL');
U3BhcmsgU1FM
""")
case class Base64(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(BinaryType)
protected override def nullSafeEval(bytes: Any): Any = {
UTF8String.fromBytes(
org.apache.commons.codec.binary.Base64.encodeBase64(
bytes.asInstanceOf[Array[Byte]]))
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (child) => {
s"""${ev.value} = UTF8String.fromBytes(
org.apache.commons.codec.binary.Base64.encodeBase64($child));
"""})
}
}
/**
* Converts the argument from a base 64 string to BINARY.
*/
@ExpressionDescription(
usage = "_FUNC_(str) - Converts the argument from a base 64 string `str` to a binary.",
examples = """
Examples:
> SELECT _FUNC_('U3BhcmsgU1FM');
Spark SQL
""")
case class UnBase64(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = BinaryType
override def inputTypes: Seq[DataType] = Seq(StringType)
protected override def nullSafeEval(string: Any): Any =
org.apache.commons.codec.binary.Base64.decodeBase64(string.asInstanceOf[UTF8String].toString)
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (child) => {
s"""
${ev.value} = org.apache.commons.codec.binary.Base64.decodeBase64($child.toString());
"""})
}
}
/**
* Decodes the first argument into a String using the provided character set
* (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
* If either argument is null, the result will also be null.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(bin, charset) - Decodes the first argument using the second argument character set.",
examples = """
Examples:
> SELECT _FUNC_(encode('abc', 'utf-8'), 'utf-8');
abc
""")
// scalastyle:on line.size.limit
case class Decode(bin: Expression, charset: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def left: Expression = bin
override def right: Expression = charset
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(BinaryType, StringType)
protected override def nullSafeEval(input1: Any, input2: Any): Any = {
val fromCharset = input2.asInstanceOf[UTF8String].toString
UTF8String.fromString(new String(input1.asInstanceOf[Array[Byte]], fromCharset))
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (bytes, charset) =>
s"""
try {
${ev.value} = UTF8String.fromString(new String($bytes, $charset.toString()));
} catch (java.io.UnsupportedEncodingException e) {
org.apache.spark.unsafe.Platform.throwException(e);
}
""")
}
}
/**
* Encodes the first argument into a BINARY using the provided character set
* (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
* If either argument is null, the result will also be null.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(str, charset) - Encodes the first argument using the second argument character set.",
examples = """
Examples:
> SELECT _FUNC_('abc', 'utf-8');
abc
""")
// scalastyle:on line.size.limit
case class Encode(value: Expression, charset: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def left: Expression = value
override def right: Expression = charset
override def dataType: DataType = BinaryType
override def inputTypes: Seq[DataType] = Seq(StringType, StringType)
protected override def nullSafeEval(input1: Any, input2: Any): Any = {
val toCharset = input2.asInstanceOf[UTF8String].toString
input1.asInstanceOf[UTF8String].toString.getBytes(toCharset)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (string, charset) =>
s"""
try {
${ev.value} = $string.toString().getBytes($charset.toString());
} catch (java.io.UnsupportedEncodingException e) {
org.apache.spark.unsafe.Platform.throwException(e);
}""")
}
}
/**
* Formats the number X to a format like '#,###,###.##', rounded to D decimal places,
* and returns the result as a string. If D is 0, the result has no decimal point or
* fractional part.
*/
@ExpressionDescription(
usage = """
_FUNC_(expr1, expr2) - Formats the number `expr1` like '#,###,###.##', rounded to `expr2`
decimal places. If `expr2` is 0, the result has no decimal point or fractional part.
This is supposed to function like MySQL's FORMAT.
""",
examples = """
Examples:
> SELECT _FUNC_(12332.123456, 4);
12,332.1235
""")
case class FormatNumber(x: Expression, d: Expression)
extends BinaryExpression with ExpectsInputTypes {
override def left: Expression = x
override def right: Expression = d
override def dataType: DataType = StringType
override def nullable: Boolean = true
override def inputTypes: Seq[AbstractDataType] = Seq(NumericType, IntegerType)
// Associated with the pattern, for the last d value, and we will update the
// pattern (DecimalFormat) once the new coming d value differ with the last one.
// This is an Option to distinguish between 0 (numberFormat is valid) and uninitialized after
// serialization (numberFormat has not been updated for dValue = 0).
@transient
private var lastDValue: Option[Int] = None
// A cached DecimalFormat, for performance concern, we will change it
// only if the d value changed.
@transient
private lazy val pattern: StringBuffer = new StringBuffer()
// SPARK-13515: US Locale configures the DecimalFormat object to use a dot ('.')
// as a decimal separator.
@transient
private lazy val numberFormat = new DecimalFormat("", new DecimalFormatSymbols(Locale.US))
override protected def nullSafeEval(xObject: Any, dObject: Any): Any = {
val dValue = dObject.asInstanceOf[Int]
if (dValue < 0) {
return null
}
lastDValue match {
case Some(last) if last == dValue =>
// use the current pattern
case _ =>
// construct a new DecimalFormat only if a new dValue
pattern.delete(0, pattern.length)
pattern.append("#,###,###,###,###,###,##0")
// decimal place
if (dValue > 0) {
pattern.append(".")
var i = 0
while (i < dValue) {
i += 1
pattern.append("0")
}
}
lastDValue = Some(dValue)
numberFormat.applyLocalizedPattern(pattern.toString)
}
x.dataType match {
case ByteType => UTF8String.fromString(numberFormat.format(xObject.asInstanceOf[Byte]))
case ShortType => UTF8String.fromString(numberFormat.format(xObject.asInstanceOf[Short]))
case FloatType => UTF8String.fromString(numberFormat.format(xObject.asInstanceOf[Float]))
case IntegerType => UTF8String.fromString(numberFormat.format(xObject.asInstanceOf[Int]))
case LongType => UTF8String.fromString(numberFormat.format(xObject.asInstanceOf[Long]))
case DoubleType => UTF8String.fromString(numberFormat.format(xObject.asInstanceOf[Double]))
case _: DecimalType =>
UTF8String.fromString(numberFormat.format(xObject.asInstanceOf[Decimal].toJavaBigDecimal))
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (num, d) => {
def typeHelper(p: String): String = {
x.dataType match {
case _ : DecimalType => s"""$p.toJavaBigDecimal()"""
case _ => s"$p"
}
}
val sb = classOf[StringBuffer].getName
val df = classOf[DecimalFormat].getName
val dfs = classOf[DecimalFormatSymbols].getName
val l = classOf[Locale].getName
// SPARK-13515: US Locale configures the DecimalFormat object to use a dot ('.')
// as a decimal separator.
val usLocale = "US"
val i = ctx.freshName("i")
val dFormat = ctx.freshName("dFormat")
val lastDValue =
ctx.addMutableState(CodeGenerator.JAVA_INT, "lastDValue", v => s"$v = -100;")
val pattern = ctx.addMutableState(sb, "pattern", v => s"$v = new $sb();")
val numberFormat = ctx.addMutableState(df, "numberFormat",
v => s"""$v = new $df("", new $dfs($l.$usLocale));""")
s"""
if ($d >= 0) {
$pattern.delete(0, $pattern.length());
if ($d != $lastDValue) {
$pattern.append("#,###,###,###,###,###,##0");
if ($d > 0) {
$pattern.append(".");
for (int $i = 0; $i < $d; $i++) {
$pattern.append("0");
}
}
$lastDValue = $d;
$numberFormat.applyLocalizedPattern($pattern.toString());
}
${ev.value} = UTF8String.fromString($numberFormat.format(${typeHelper(num)}));
} else {
${ev.value} = null;
${ev.isNull} = true;
}
"""
})
}
override def prettyName: String = "format_number"
}
/**
* Splits a string into arrays of sentences, where each sentence is an array of words.
* The 'lang' and 'country' arguments are optional, and if omitted, the default locale is used.
*/
@ExpressionDescription(
usage = "_FUNC_(str[, lang, country]) - Splits `str` into an array of array of words.",
examples = """
Examples:
> SELECT _FUNC_('Hi there! Good morning.');
[["Hi","there"],["Good","morning"]]
""")
case class Sentences(
str: Expression,
language: Expression = Literal(""),
country: Expression = Literal(""))
extends Expression with ImplicitCastInputTypes with CodegenFallback {
def this(str: Expression) = this(str, Literal(""), Literal(""))
def this(str: Expression, language: Expression) = this(str, language, Literal(""))
override def nullable: Boolean = true
override def dataType: DataType =
ArrayType(ArrayType(StringType, containsNull = false), containsNull = false)
override def inputTypes: Seq[AbstractDataType] = Seq(StringType, StringType, StringType)
override def children: Seq[Expression] = str :: language :: country :: Nil
override def eval(input: InternalRow): Any = {
val string = str.eval(input)
if (string == null) {
null
} else {
val languageStr = language.eval(input).asInstanceOf[UTF8String]
val countryStr = country.eval(input).asInstanceOf[UTF8String]
val locale = if (languageStr != null && countryStr != null) {
new Locale(languageStr.toString, countryStr.toString)
} else {
Locale.US
}
getSentences(string.asInstanceOf[UTF8String].toString, locale)
}
}
private def getSentences(sentences: String, locale: Locale) = {
val bi = BreakIterator.getSentenceInstance(locale)
bi.setText(sentences)
var idx = 0
val result = new ArrayBuffer[GenericArrayData]
while (bi.next != BreakIterator.DONE) {
val sentence = sentences.substring(idx, bi.current)
idx = bi.current
val wi = BreakIterator.getWordInstance(locale)
var widx = 0
wi.setText(sentence)
val words = new ArrayBuffer[UTF8String]
while (wi.next != BreakIterator.DONE) {
val word = sentence.substring(widx, wi.current)
widx = wi.current
if (Character.isLetterOrDigit(word.charAt(0))) words += UTF8String.fromString(word)
}
result += new GenericArrayData(words)
}
new GenericArrayData(result)
}
}
| sureshthalamati/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/stringExpressions.scala | Scala | apache-2.0 | 77,194 |
// timber -- Copyright 2012-2021 -- Justin Patterson
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalawag.timber.backend.dispatcher.configuration.dsl
import org.scalawag.timber.api.Tag
import org.scalawag.timber.backend.dispatcher.EntryFacets
object tagged {
case class TaggedCondition private[dsl] (val tag: Tag) extends Condition {
override def accepts(entryFacets: EntryFacets): Option[Boolean] = entryFacets.tags.map(_.contains(tag))
override val toString = "tagged(%s)".format(tag)
}
def apply(t: Tag) = new TaggedCondition(t)
}
| scalawag/timber | timber-backend/src/main/scala/org/scalawag/timber/backend/dispatcher/configuration/dsl/tagged.scala | Scala | apache-2.0 | 1,074 |
/**
*
*/
package ru.kfu.itis.issst.corpus
import java.io.File
import org.apache.commons.lang3.builder.ToStringBuilder
import org.apache.commons.lang3.builder.ToStringStyle
/**
* @author Rinat Gareev (Kazan Federal University)
*
*/
class AnnoDoc(val id: String, val annotatedBy: String,
val txtFile: File, val annFile: File,
val errorTag: Option[String] = None) {
override def toString: String = new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE)
.append(id).append("annotatedBy", annotatedBy)
.append("errorTag", errorTag)
.toString()
}
| rgareev/corpus-utils | corpus-utils-scala/src/main/scala/ru/kfu/itis/issst/corpus/AnnoDoc.scala | Scala | mit | 573 |
package at.logic.gapt.expr
import at.logic.gapt.proofs.Sequent
import at.logic.gapt.utils.Not
trait Replaceable[-I, +O] {
def replace( obj: I, p: PartialFunction[Expr, Expr] ): O
def names( obj: I ): Set[VarOrConst]
}
trait ClosedUnderReplacement[T] extends Replaceable[T, T]
object Replaceable {
private object exprReplacer extends ClosedUnderReplacement[Expr] {
def replace( term: Expr, map: PartialFunction[Expr, Expr] ): Expr =
term match {
case _ if map isDefinedAt term => map( term )
// special case polymorphic constants so that we can do type-changing replacements
// but only if the user doesn't specify any replacement for the logical constants
case Eq( s, t ) if !( map isDefinedAt EqC( s.ty ) ) =>
Eq( replace( s, map ), replace( t, map ) )
case All( x, t ) if !( map isDefinedAt ForallC( x.ty ) ) =>
All( replace( x, map ).asInstanceOf[Var], replace( t, map ) )
case Ex( x, t ) if !( map isDefinedAt ExistsC( x.ty ) ) =>
Ex( replace( x, map ).asInstanceOf[Var], replace( t, map ) )
case App( s, t ) =>
App( replace( s, map ), replace( t, map ) )
case Abs( x, t ) =>
Abs( replace( x, map ).asInstanceOf[Var], replace( t, map ) )
case _ => term
}
def names( term: Expr ): Set[VarOrConst] =
constants( term ).toSet[VarOrConst] union variables( term ).toSet
}
private object formulaReplacer extends ClosedUnderReplacement[Formula] {
override def replace( obj: Formula, p: PartialFunction[Expr, Expr] ): Formula =
exprReplacer.replace( obj, p ).asInstanceOf[Formula]
def names( obj: Formula ) = exprReplacer.names( obj )
}
implicit def exprReplaceable[I <: Expr]( implicit notAFormula: Not[I <:< Formula] ): Replaceable[I, Expr] = exprReplacer
implicit def formulaReplaceable[I <: Formula]( implicit notAnAtom: Not[I <:< Atom] ): Replaceable[I, Formula] = formulaReplacer
implicit object holAtomReplaceable extends ClosedUnderReplacement[Atom] {
override def replace( obj: Atom, p: PartialFunction[Expr, Expr] ): Atom =
exprReplacer.replace( obj, p ).asInstanceOf[Atom]
def names( obj: Atom ) = exprReplacer.names( obj )
}
implicit object substitutionReplaceable extends ClosedUnderReplacement[Substitution] {
def replace( subst: Substitution, p: PartialFunction[Expr, Expr] ): Substitution =
Substitution( for ( ( l, r ) <- subst.map ) yield TermReplacement( l, p ).asInstanceOf[Var] -> TermReplacement( r, p ) )
def names( obj: Substitution ) =
obj.map.keySet ++ obj.map.values flatMap { containedNames( _ ) }
}
implicit object definitionReplaceable extends ClosedUnderReplacement[Definition] {
def replace( definition: Definition, p: PartialFunction[Expr, Expr] ): Definition =
Definition( TermReplacement( definition.what, p ).asInstanceOf[Const], TermReplacement( definition.by, p ) )
def names( obj: Definition ) =
Set[VarOrConst]( obj.what ) union exprReplacer.names( obj.by )
}
implicit def sequentReplaceable[I, O]( implicit ev: Replaceable[I, O] ): Replaceable[Sequent[I], Sequent[O]] =
new Replaceable[Sequent[I], Sequent[O]] {
override def replace( obj: Sequent[I], p: PartialFunction[Expr, Expr] ) =
obj.map { TermReplacement( _, p ) }
def names( obj: Sequent[I] ) = obj.elements flatMap { containedNames( _ ) } toSet
}
implicit def seqReplaceable[I, O]( implicit ev: Replaceable[I, O] ): Replaceable[Seq[I], Seq[O]] =
new Replaceable[Seq[I], Seq[O]] {
override def replace( obj: Seq[I], p: PartialFunction[Expr, Expr] ) =
obj.map { TermReplacement( _, p ) }
def names( obj: Seq[I] ) = obj flatMap { containedNames( _ ) } toSet
}
implicit def setReplaceable[I, O]( implicit ev: Replaceable[I, O] ): Replaceable[Set[I], Set[O]] =
new Replaceable[Set[I], Set[O]] {
override def replace( obj: Set[I], p: PartialFunction[Expr, Expr] ) =
obj.map { TermReplacement( _, p ) }
def names( obj: Set[I] ) = obj flatMap { containedNames( _ ) }
}
implicit def optionReplaceable[I, O]( implicit ev: Replaceable[I, O] ): Replaceable[Option[I], Option[O]] =
new Replaceable[Option[I], Option[O]] {
override def replace( obj: Option[I], p: PartialFunction[Expr, Expr] ) =
obj.map { TermReplacement( _, p ) }
def names( obj: Option[I] ) = obj.toSet[I] flatMap { containedNames( _ ) }
}
implicit def tupleReplaceable[I1, I2, O1, O2]( implicit ev1: Replaceable[I1, O1], ev2: Replaceable[I2, O2] ): Replaceable[( I1, I2 ), ( O1, O2 )] =
new Replaceable[( I1, I2 ), ( O1, O2 )] {
override def replace( obj: ( I1, I2 ), p: PartialFunction[Expr, Expr] ): ( O1, O2 ) =
( ev1.replace( obj._1, p ), ev2.replace( obj._2, p ) )
def names( obj: ( I1, I2 ) ) = containedNames( obj._1 ) union containedNames( obj._2 )
}
implicit def mapReplaceable[I1, I2, O1, O2]( implicit ev1: Replaceable[I1, O1], ev2: Replaceable[I2, O2] ): Replaceable[Map[I1, I2], Map[O1, O2]] =
new Replaceable[Map[I1, I2], Map[O1, O2]] {
override def replace( obj: Map[I1, I2], p: PartialFunction[Expr, Expr] ): Map[O1, O2] =
obj.map( TermReplacement( _, p ) )
def names( obj: Map[I1, I2] ) = containedNames( obj.toSeq )
}
}
/**
* A term replacement homomorphically extends a partial function on lambda expressions to all lambda expressions.
*
* This is done on a "best effort" basis. Replacing constants by ground terms of the same type will usually work, anything beyond that might or might not work.
*/
object TermReplacement {
def apply( term: Expr, what: Expr, by: Expr ): Expr =
apply( term, Map( what -> by ) )
def apply( f: Formula, what: Expr, by: Expr ): Formula =
apply( f, Map( what -> by ) )
def apply[I, O]( obj: I, p: PartialFunction[Expr, Expr] )( implicit ev: Replaceable[I, O] ): O =
ev.replace( obj, p )
/**
* Performs capture-avoiding term replacement.
*
* If a constant or variable occurs both in the range of partialMap
* and obj, we rename the occurrence in obj first to something else.
*/
def hygienic[I, O]( obj: I, partialMap: Map[Const, Expr] )( implicit ev: Replaceable[I, O] ): O = {
val namesInObj = containedNames( obj )
val namesInRange = partialMap.values.flatMap { containedNames( _ ) }.toSet
val needToRename = namesInObj intersect namesInRange -- partialMap.keySet
val nameGen = rename.awayFrom( namesInObj ++ namesInRange ++ partialMap.keySet )
val renaming = for ( n <- needToRename ) yield n -> nameGen.fresh( n )
TermReplacement( obj, partialMap ++ renaming toMap )
}
}
object containedNames {
def apply[I, O]( obj: I )( implicit ev: Replaceable[I, O] ): Set[VarOrConst] =
ev.names( obj )
}
| gebner/gapt | core/src/main/scala/at/logic/gapt/expr/TermReplacement.scala | Scala | gpl-3.0 | 6,831 |
package models
import java.util.Date
import anorm.SqlParser._
import anorm._
/**
* Created by anand on 18/8/15.
*/
trait DAOParsers {
// -- Parsers
/**
* Parse an User from a ResultSet
*/
val user = {
get[Long]("users.id") ~
get[String]("users.name") ~
get[String]("users.designation") ~
get[String]("users.email") ~
get[String]("users.password") ~
get[Boolean]("users.is_admin") map {
case id ~ name ~ designation ~ email ~ password ~ isAdmin => User(id, name, designation, email, password, isAdmin)
}
}
/**
* Parse a Comment from a ResultSet
*/
val comment = {
get[Long]("comments.id") ~
get[Long]("comments.ticket_id") ~
get[String]("comments.comment") ~
get[Date]("comments.created_at") ~
user map {
case id ~ ticketId ~ comment ~ createdAt ~ user => Comment(id, user, ticketId, comment, createdAt)
}
}
/**
* Parse a customer from a ResultSet
*/
val customer = {
get[Long]("customers.id") ~
get[String]("customers.name") ~
get[String]("customers.email") ~
get[String]("customers.address") ~
get[String]("customers.contact") map {
case id ~ name ~ email ~ address ~ contact => Customer(id, name, email, address, contact)
}
}
/**
* Parse a ticket from a ResultSet
*/
val ticket = {
get[Long]("tickets.id") ~
get[String]("tickets.description") ~
get[String]("tickets.status") ~
get[String]("tickets.area") ~
get[Date]("tickets.created_at") ~
get[Date]("tickets.updated_at") ~
get[Long]("tickets.customer_id") ~
get[Long]("tickets.created_by") ~
get[Long]("tickets.assigned_to") map {
case id ~ description ~ status ~ area ~ createdAt ~ updatedAt ~ customerId ~ createdBy ~ assignedTo =>
Ticket(id, description, status, area, createdAt, updatedAt, customerId, createdBy, assignedTo)
}
}
}
| anand-singh/csr-hotel-system | app/models/DAOParsers.scala | Scala | apache-2.0 | 1,940 |
package com.olegych.scastie.client
import com.olegych.scastie.api._
import play.api.libs.json._
object SnippetState {
implicit val formatSnippetState: OFormat[SnippetState] =
Json.format[SnippetState]
}
case class SnippetState(
snippetId: Option[SnippetId],
loadSnippet: Boolean,
scalaJsContent: Option[String],
)
object ScastieState {
def default(isEmbedded: Boolean): ScastieState = {
ScastieState(
view = View.Editor,
isRunning = false,
statusStream = None,
progressStream = None,
modalState =
if (isEmbedded) ModalState.allClosed
else ModalState.default,
isDarkTheme = false,
isDesktopForced = false,
isPresentationMode = false,
showLineNumbers = true,
consoleState = ConsoleState.default,
inputsHasChanged = false,
snippetState = SnippetState(
snippetId = None,
loadSnippet = true,
scalaJsContent = None,
),
user = None,
attachedDoms = AttachedDoms(Map()),
inputs = Inputs.default,
outputs = Outputs.default,
status = StatusState.empty,
isEmbedded = isEmbedded
)
}
implicit val dontSerializeAttachedDoms: Format[AttachedDoms] =
dontSerialize[AttachedDoms](AttachedDoms(Map()))
implicit val dontSerializeStatusState: Format[StatusState] =
dontSerialize[StatusState](StatusState.empty)
implicit val dontSerializeEventStream: Format[EventStream[StatusProgress]] =
dontSerializeOption[EventStream[StatusProgress]]
implicit val dontSerializeProgressStream: Format[EventStream[SnippetProgress]] =
dontSerializeOption[EventStream[SnippetProgress]]
implicit val formatScastieState: OFormat[ScastieState] =
Json.format[ScastieState]
}
case class ScastieState(
view: View,
isRunning: Boolean,
statusStream: Option[EventStream[StatusProgress]],
progressStream: Option[EventStream[SnippetProgress]],
modalState: ModalState,
isDarkTheme: Boolean,
isDesktopForced: Boolean,
isPresentationMode: Boolean,
showLineNumbers: Boolean,
consoleState: ConsoleState,
inputsHasChanged: Boolean,
snippetState: SnippetState,
user: Option[User],
attachedDoms: AttachedDoms,
inputs: Inputs,
outputs: Outputs,
status: StatusState,
isEmbedded: Boolean = false,
transient: Boolean = false,
) {
def snippetId: Option[SnippetId] = snippetState.snippetId
def loadSnippet: Boolean = snippetState.loadSnippet
def copyAndSave(
attachedDoms: AttachedDoms = attachedDoms,
view: View = view,
isRunning: Boolean = isRunning,
statusStream: Option[EventStream[StatusProgress]] = statusStream,
progressStream: Option[EventStream[SnippetProgress]] = progressStream,
modalState: ModalState = modalState,
isDarkTheme: Boolean = isDarkTheme,
isPresentationMode: Boolean = isPresentationMode,
isDesktopForced: Boolean = isDesktopForced,
showLineNumbers: Boolean = showLineNumbers,
consoleState: ConsoleState = consoleState,
inputsHasChanged: Boolean = inputsHasChanged,
snippetId: Option[SnippetId] = snippetId,
loadSnippet: Boolean = loadSnippet,
scalaJsContent: Option[String] = snippetState.scalaJsContent,
user: Option[User] = user,
inputs: Inputs = inputs,
outputs: Outputs = outputs,
status: StatusState = status,
transient: Boolean = transient,
): ScastieState = {
val state0 =
copy(
view = view,
isRunning = isRunning,
statusStream = statusStream,
progressStream = progressStream,
modalState = modalState,
isDarkTheme = isDarkTheme,
isDesktopForced = isDesktopForced,
isPresentationMode = isPresentationMode,
showLineNumbers = showLineNumbers,
consoleState = consoleState,
inputsHasChanged = inputsHasChanged,
snippetState = SnippetState(
snippetId = snippetId,
loadSnippet = loadSnippet,
scalaJsContent = scalaJsContent,
),
user = user,
attachedDoms = attachedDoms,
inputs = inputs.copy(
isShowingInUserProfile = false,
forked = None
),
outputs = outputs,
status = status,
isEmbedded = isEmbedded,
transient = transient,
)
if (!isEmbedded && !transient) {
LocalStorage.save(state0)
}
state0
}
private def coalesceUpdates(update: ScastieState => ScastieState) = {
if (transient) update(this) else update(this.copy(transient = true)).copyAndSave(transient = false)
}
def isBuildDefault: Boolean = inputs.isDefault
def isClearable: Boolean =
outputs.isClearable
def run(snippetId: SnippetId): ScastieState = {
clearOutputs.resetScalajs
.setRunning(true)
.logSystem("Sending task to the server.")
.setSnippetId(snippetId)
}
def setRunning(isRunning: Boolean): ScastieState = {
val openConsole = consoleState.consoleHasUserOutput || outputs.sbtError
copyAndSave(isRunning = isRunning).autoOpen(openConsole)
}
def toggleTheme: ScastieState =
copyAndSave(isDarkTheme = !isDarkTheme)
def setTheme(dark: Boolean): ScastieState =
copyAndSave(isDarkTheme = dark)
def toggleLineNumbers: ScastieState =
copyAndSave(showLineNumbers = !showLineNumbers)
def togglePresentationMode: ScastieState =
copyAndSave(isPresentationMode = !isPresentationMode)
def toggleWorksheetMode: ScastieState =
copyAndSave(
inputs = inputs.copy(_isWorksheetMode = !inputs.isWorksheetMode),
inputsHasChanged = true
)
def toggleHelpModal: ScastieState =
copyAndSave(
modalState = modalState.copy(isHelpModalClosed = !modalState.isHelpModalClosed)
)
def openHelpModal: ScastieState =
copyAndSave(modalState = modalState.copy(isHelpModalClosed = false))
def closeHelpModal: ScastieState =
copyAndSave(modalState = modalState.copy(isHelpModalClosed = true))
def openResetModal: ScastieState =
copyAndSave(modalState = modalState.copy(isResetModalClosed = false))
def closeResetModal: ScastieState =
copyAndSave(modalState = modalState.copy(isResetModalClosed = true))
def openNewSnippetModal: ScastieState =
copyAndSave(modalState = modalState.copy(isNewSnippetModalClosed = false))
def closeNewSnippetModal: ScastieState =
copyAndSave(modalState = modalState.copy(isNewSnippetModalClosed = true))
def openShareModal(snippetId: Option[SnippetId]): ScastieState =
copyAndSave(modalState = modalState.copy(shareModalSnippetId = snippetId))
def closeShareModal: ScastieState =
copyAndSave(modalState = modalState.copy(shareModalSnippetId = None))
def openEmbeddedModal: ScastieState =
copyAndSave(modalState = modalState.copy(isEmbeddedClosed = false))
def closeEmbeddedModal: ScastieState =
copyAndSave(modalState = modalState.copy(isEmbeddedClosed = true))
def forceDesktop: ScastieState = copyAndSave(isDesktopForced = true)
def openConsole: ScastieState = {
copyAndSave(
consoleState = consoleState.copy(
consoleIsOpen = true,
userOpenedConsole = true
)
)
}
def closeConsole: ScastieState = {
copyAndSave(
consoleState = consoleState.copy(
consoleIsOpen = false,
userOpenedConsole = false
)
)
}
def autoOpen(isOpen: Boolean): ScastieState = {
copyAndSave(
consoleState = consoleState.copy(
consoleIsOpen = isOpen || consoleState.consoleIsOpen
)
)
}
def toggleConsole: ScastieState = {
copyAndSave(
consoleState =
if (consoleState.consoleIsOpen)
consoleState.copy(
consoleIsOpen = false,
userOpenedConsole = false
)
else
consoleState.copy(
consoleIsOpen = true,
userOpenedConsole = true
)
)
}
def setUserOutput: ScastieState = {
copyAndSave(consoleState = consoleState.copy(consoleHasUserOutput = true))
}
def setLoadSnippet(value: Boolean): ScastieState =
copy(snippetState = snippetState.copy(loadSnippet = value))
def setUser(user: Option[User]): ScastieState =
copyAndSave(user = user)
def setCode(code: String): ScastieState = {
if (inputs.code != code) {
copyAndSave(
inputs = inputs.copy(code = code),
inputsHasChanged = true
)
} else {
this
}
}
def setInputs(inputs: Inputs): ScastieState =
copyAndSave(
inputs = inputs
)
def setSbtConfigExtra(config: String): ScastieState =
copyAndSave(
inputs = inputs.copy(sbtConfigExtra = config),
inputsHasChanged = true
)
def setChangedInputs: ScastieState =
copyAndSave(inputsHasChanged = true)
def setCleanInputs: ScastieState =
copyAndSave(inputsHasChanged = false)
def setView(newView: View): ScastieState =
copyAndSave(view = newView)
def setTarget(target: ScalaTarget): ScastieState =
copyAndSave(
inputs = inputs.modifyConfig(_.copy(target = target)),
inputsHasChanged = true
)
def clearDependencies: ScastieState =
copyAndSave(
inputs = inputs.clearDependencies,
inputsHasChanged = true
)
def addScalaDependency(scalaDependency: ScalaDependency, project: Project): ScastieState = {
val newInputs = inputs.addScalaDependency(scalaDependency, project)
copyAndSave(
inputs = newInputs,
inputsHasChanged = newInputs != inputs,
)
}
def removeScalaDependency(scalaDependency: ScalaDependency): ScastieState =
copyAndSave(
inputs = inputs.removeScalaDependency(scalaDependency),
inputsHasChanged = true
)
def updateDependencyVersion(scalaDependency: ScalaDependency, version: String): ScastieState = {
copyAndSave(
inputs = inputs.updateScalaDependency(scalaDependency, version),
inputsHasChanged = true
)
}
def scalaJsScriptLoaded: ScastieState = copyAndSave(scalaJsContent = None)
def resetScalajs: ScastieState = copyAndSave(attachedDoms = AttachedDoms(Map()))
def clearOutputs: ScastieState = {
copyAndSave(
outputs = Outputs.default,
consoleState = consoleState.copy(
consoleHasUserOutput = false
)
)
}
def clearOutputsPreserveConsole: ScastieState = {
copyAndSave(
outputs = Outputs.default.copy(consoleOutputs = outputs.consoleOutputs),
)
}
def closeModals: ScastieState =
copyAndSave(modalState = ModalState.allClosed)
def setRuntimeError(runtimeError: Option[RuntimeError]): ScastieState =
if (runtimeError.isEmpty) this
else copyAndSave(outputs = outputs.copy(runtimeError = runtimeError))
def setSbtError(err: Boolean): ScastieState =
copyAndSave(outputs = outputs.copy(sbtError = err))
def logOutput(line: Option[ProcessOutput], wrap: ProcessOutput => ConsoleOutput): ScastieState = {
line match {
case Some(l) =>
copyAndSave(
outputs = outputs.copy(
consoleOutputs = outputs.consoleOutputs ++ Vector(wrap(l))
)
)
case _ => this
}
}
def logSystem(line: String): ScastieState = {
copyAndSave(
outputs = outputs.copy(
consoleOutputs = outputs.consoleOutputs ++ Vector(
ConsoleOutput.ScastieOutput(line)
)
)
)
}
def addProgress(progress: SnippetProgress): ScastieState = coalesceUpdates { self =>
val state = self
.addOutputs(progress.compilationInfos, progress.instrumentations)
.logOutput(progress.userOutput, ConsoleOutput.UserOutput.apply _)
.logOutput(progress.sbtOutput, ConsoleOutput.SbtOutput.apply _)
.setForcedProgramMode(progress.isForcedProgramMode)
.setRuntimeError(progress.runtimeError)
.setSbtError(progress.isSbtError)
.setRunning(!progress.isDone)
.copyAndSave(scalaJsContent = progress.scalaJsContent.orElse(self.snippetState.scalaJsContent))
if (progress.userOutput.exists(_.line.nonEmpty)) state.setUserOutput
else state
}
def addStatus(statusUpdate: StatusProgress): ScastieState = {
statusUpdate match {
case StatusProgress.KeepAlive =>
this
case StatusProgress.Sbt(sbtRunners) =>
copyAndSave(status = status.copy(sbtRunners = Some(sbtRunners)))
}
}
def removeStatus: ScastieState = {
copyAndSave(status = StatusState.empty)
}
def setProgresses(progresses: List[SnippetProgress]): ScastieState = coalesceUpdates { self =>
progresses.foldLeft(self) {
case (state, progress) => state.addProgress(progress)
}
}
def setSnippetId(snippetId: SnippetId): ScastieState = copyAndSave(snippetId = Some(snippetId))
def clearSnippetId: ScastieState = copyAndSave(snippetId = None)
private def info(message: String) = Problem(Info, None, message)
def setForcedProgramMode(forcedProgramMode: Boolean): ScastieState = {
if (!forcedProgramMode) this
else {
copyAndSave(
outputs = outputs.copy(
compilationInfos = outputs.compilationInfos +
info(
"You don't need a main method (or extends Scastie) in Worksheet Mode"
)
)
)
}
}
def addOutputs(compilationInfos: List[Problem], instrumentations: List[Instrumentation]): ScastieState = {
def topDef(problem: Problem): Boolean = {
problem.severity == Error &&
problem.message == "expected class or object definition"
}
val useWorksheetModeTip =
if (compilationInfos.exists(ci => topDef(ci)))
if (inputs.target.hasWorksheetMode)
Set(
info(
"""|It seems you're writing code without an enclosing class/object.
|Switch to Worksheet mode if you want to use scastie more like a REPL.""".stripMargin
)
)
else
Set(
info(
"""|It seems you're writing code without an enclosing class/object.
|This configuration does not support Worksheet mode.""".stripMargin
)
)
else Set()
copyAndSave(
outputs = outputs.copy(
compilationInfos = outputs.compilationInfos ++ compilationInfos.toSet ++ useWorksheetModeTip,
instrumentations = outputs.instrumentations ++ instrumentations.toSet
)
)
}
override def toString: String = Json.toJson(this).toString()
}
| scalacenter/scastie | client/src/main/scala/com.olegych.scastie.client/ScastieState.scala | Scala | apache-2.0 | 14,447 |
/**
* Created by Andrea Collamati <[email protected]> on 07/12/2019.
*/
package com.getjenny.starchat.analyzer.atoms
import com.getjenny.analyzer.atoms.{AbstractAtomic, ExceptionAtomic}
import com.getjenny.analyzer.entities.{AnalyzersDataInternal, Result}
import com.getjenny.analyzer.util.ComparisonOperators
import java.time.format.DateTimeFormatter
import java.time.{Duration, LocalDateTime, ZoneId}
/**
* Atomic to compare dates
*
* It accepts these arguments:
* Arg0 = inputDate in ISO_LOCAL_DATE_TIME format
* (https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html#ISO_LOCAL_DATE_TIME) format
* Arg1 = operator ("LessOrEqual","Less","Greater","GreaterOrEqual","Equal")
* Arg2 = shift represent a delta time interval and is represented using the ISO-8601 duration format PnDTnHnMn.nS
* with days considered to be exactly 24 hours (https://en.wikipedia.org/wiki/ISO_8601#Durations)
* Arg3 = Time Zone (Possibly as Europe/Helsinki). Used only in case compareDate is current time
* Arg4 = compareDate in ISO_LOCAL_DATE_TIME format. If "" is current date and time + shift (Arg2)
* The atomic return boolean comparison result between inputDate and (compareToDate + shift)
*
* Ex: compareDate = CheckDate("2019-12-07T11:50:55","Greater","","P7D", "Europe/Helsinki") compare 1st december 2019 12:00:00 CET > Now() + 7 days
*
*/
class CheckDateAtomic(val arguments: List[String],
restrictedArgs: Map[String, String]) extends AbstractAtomic {
val atomName: String = "checkDate"
val inputDate: LocalDateTime = arguments.headOption match {
case Some(t) => LocalDateTime.parse(t, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
case _ => throw ExceptionAtomic("CheckDateAtomic: arg 0/4 must be a time")
}
val operator: String = arguments.lift(1) match {
case Some(t) => t
case _ => throw ExceptionAtomic("CheckDateAtomic: arg 1/4 must be an operator string")
}
val shift: Duration = arguments.lift(2) match {
case Some(t) => Duration.parse(t)
case _ => throw ExceptionAtomic("CheckDateAtomic: arg 2/4 must be a duration")
}
val timeZone: ZoneId = arguments.lift(3) match {
case Some(t) => ZoneId.of(t)
case _ => throw ExceptionAtomic("CheckDateAtomic: arg 3/4 must be a timezone")
}
val timeToBeComparedString: String = arguments.lift(4) match {
case Some(t) => t
case _ => ""
}
override def toString: String = "CheckDate(\\"" + arguments + "\\")"
val isEvaluateNormalized: Boolean = true
def evaluate(query: String, data: AnalyzersDataInternal = AnalyzersDataInternal()): Result = {
val timeToBeCompared: LocalDateTime = timeToBeComparedString.isEmpty match {
case true => LocalDateTime.now(timeZone).plusNanos(this.shift.toNanos)
case _ => LocalDateTime.parse(timeToBeComparedString, DateTimeFormatter.ISO_LOCAL_DATE_TIME).plusNanos(shift.toNanos)
}
if (ComparisonOperators.compare(inputDate.compareTo(timeToBeCompared), 0, operator))
Result(score = 1.0)
else
Result(score = 0.0)
}
}
| GetJenny/starchat | src/main/scala/com/getjenny/starchat/analyzer/atoms/CheckDateAtomic.scala | Scala | gpl-2.0 | 3,062 |
package cilib
trait NumericTo[@specialized A] {
def fromDouble(a: Double): A
}
object NumericTo {
@inline def apply[A](implicit N: NumericTo[A]) = N
implicit val NumericToDouble: NumericTo[Double] =
new NumericTo[Double] {
def fromDouble(a: Double): Double = a
}
}
| cirg-up/cilib | core/src/main/scala/cilib/NumericTo.scala | Scala | apache-2.0 | 288 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.repl
import java.io.BufferedReader
import scala.tools.nsc.Settings
import scala.tools.nsc.interpreter.{ILoop, JPrintWriter}
import scala.tools.nsc.util.stringFromStream
import scala.util.Properties.{javaVersion, javaVmName, versionString}
/**
* A Spark-specific interactive shell.
*/
class SparkILoop(in0: Option[BufferedReader], out: JPrintWriter)
extends ILoop(in0, out) {
def this(in0: BufferedReader, out: JPrintWriter) = this(Some(in0), out)
def this() = this(None, new JPrintWriter(Console.out, true))
val initializationCommands: Seq[String] = Seq(
"""
@transient val spark = if (org.apache.spark.repl.Main.sparkSession != null) {
org.apache.spark.repl.Main.sparkSession
} else {
org.apache.spark.repl.Main.createSparkSession()
}
@transient val sc = {
val _sc = spark.sparkContext
if (_sc.getConf.getBoolean("spark.ui.reverseProxy", false)) {
val proxyUrl = _sc.getConf.get("spark.ui.reverseProxyUrl", null)
if (proxyUrl != null) {
println(
s"Spark Context Web UI is available at ${proxyUrl}/proxy/${_sc.applicationId}")
} else {
println(s"Spark Context Web UI is available at Spark Master Public URL")
}
} else {
_sc.uiWebUrl.foreach {
webUrl => println(s"Spark context Web UI available at ${webUrl}")
}
}
println("Spark context available as 'sc' " +
s"(master = ${_sc.master}, app id = ${_sc.applicationId}).")
println("Spark session available as 'spark'.")
_sc
}
""",
"import org.apache.spark.SparkContext._",
"import spark.implicits._",
"import spark.sql",
"import org.apache.spark.sql.functions._"
)
def initializeSpark() {
intp.beQuietDuring {
savingReplayStack { // remove the commands from session history.
initializationCommands.foreach(command)
}
}
}
/** Print a welcome message */
override def printWelcome() {
import org.apache.spark.SPARK_VERSION
echo("""Welcome to
____ __
/ __/__ ___ _____/ /__
_\\ \\/ _ \\/ _ `/ __/ '_/
/___/ .__/\\_,_/_/ /_/\\_\\ version %s
/_/
""".format(SPARK_VERSION))
val welcomeMsg = "Using Scala %s (%s, Java %s)".format(
versionString, javaVmName, javaVersion)
echo(welcomeMsg)
echo("Type in expressions to have them evaluated.")
echo("Type :help for more information.")
}
/** Available commands */
override def commands: List[LoopCommand] = standardCommands
/**
* We override `createInterpreter` because we need to initialize Spark *before* the REPL
* sees any files, so that the Spark context is visible in those files. This is a bit of a
* hack, but there isn't another hook available to us at this point.
*/
override def createInterpreter(): Unit = {
super.createInterpreter()
initializeSpark()
}
override def resetCommand(line: String): Unit = {
super.resetCommand(line)
initializeSpark()
echo("Note that after :reset, state of SparkSession and SparkContext is unchanged.")
}
override def replay(): Unit = {
initializeSpark()
super.replay()
}
}
object SparkILoop {
/**
* Creates an interpreter loop with default settings and feeds
* the given code to it as input.
*/
def run(code: String, sets: Settings = new Settings): String = {
import java.io.{ BufferedReader, StringReader, OutputStreamWriter }
stringFromStream { ostream =>
Console.withOut(ostream) {
val input = new BufferedReader(new StringReader(code))
val output = new JPrintWriter(new OutputStreamWriter(ostream), true)
val repl = new SparkILoop(input, output)
if (sets.classpath.isDefault) {
sets.classpath.value = sys.props("java.class.path")
}
repl process sets
}
}
}
def run(lines: List[String]): String = run(lines.map(_ + "\\n").mkString)
}
| bravo-zhang/spark | repl/scala-2.12/src/main/scala/org/apache/spark/repl/SparkILoop.scala | Scala | apache-2.0 | 4,790 |
package io.buoyant.router.http
import com.twitter.finagle.Service
import com.twitter.finagle.http.Fields.Via
import com.twitter.finagle.http.{HeaderMap, Request, Response, Status}
import com.twitter.util.Future
import io.buoyant.test.{Awaits, FunSuite}
class MaxCallDepthFilterTest extends FunSuite with Awaits {
implicit object HttpHeadersLike extends HeadersLike[HeaderMap] {
override def toSeq(headers: HeaderMap): Seq[(String, String)] = ???
override def contains(headers: HeaderMap, k: String): Boolean = ???
override def get(headers: HeaderMap, k: String): Option[String] = headers.get(k)
override def getAll(headers: HeaderMap, k: String): Seq[String] = ???
override def add(headers: HeaderMap, k: String, v: String): Unit = ???
override def set(headers: HeaderMap, k: String, v: String): Unit = ???
override def remove(headers: HeaderMap, key: String): Seq[String] = ???
}
implicit object HttpRequestLike extends RequestLike[Request, HeaderMap] {
override def headers(request: Request): HeaderMap = request.headerMap
}
def service(maxCallDepth: Int) = new MaxCallDepthFilter[Request, HeaderMap, Response](
maxCallDepth,
Via
).andThen(Service.mk[Request, Response](_ => Future.value(Response())))
test("passes through requests not exceeding max hops") {
val viaHeader = (1 to 10).map(v => s"hop $v").mkString(", ")
val req = Request()
req.headerMap.add(Via, viaHeader)
assert(await(service(10)(req)).status == Status.Ok)
}
test("stops requests exceeding max hops") {
val expectedMessage = "Maximum number of calls (9) has been exceeded. Please check for proxy loops."
val viaHeader = (1 to 10).map(v => s"hop $v").mkString(", ")
val req = Request()
req.headerMap.add(Via, viaHeader)
val exception = intercept[MaxCallDepthFilter.MaxCallDepthExceeded] {
await(service(9)(req))
}
assert(exception.getMessage == expectedMessage)
}
}
| linkerd/linkerd | router/base-http/src/test/scala/io/buoyant/router/http/MaxCallDepthFilterTest.scala | Scala | apache-2.0 | 1,958 |
/*
* Copyright (C) 2014 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package fr.geocites.indus
import fr.geocites.gugus._
import fr.geocites.gugus.balance._
import fr.geocites.gugus.structure.Network
import fr.geocites.simpuzzle.flatten
import scala.util.Random
import monocle._
case class State(step: Int, cities: Seq[City], network: Network, distanceMatrix: DistanceMatrix)
case class City(
population: Double,
wealth: Double,
district: String,
state: String,
districtCapital: Boolean,
stateCapital: Boolean)
trait Indus <: Gugus with SuperLinearInitialWealth {
type STATE = State
type CITY = City
def population = Lenser[CITY](_.population)
def wealth = Lenser[CITY](_.wealth)
def cities = Lenser[STATE](_.cities)
def step = Lenser[STATE](_.step)
def network = Lenser[STATE](_.network)
def distances = Lenser[STATE](_.distanceMatrix)
def arokatos = IndusFile.arokatos
def populations(date: Int) = IndusFile.populations(date)
def firstDate = IndusFile.firstDate
def initialState(implicit rng: Random) = {
val cities = initialCities
State(0, initialCities.toVector, Network.full(cities.size), IndusFile.distanceMatrix)
}
def initialCities(implicit rng: Random) = {
val initialPopulations = IndusFile.initialPopulations
val pop = initialPopulations.toSeq
val initialWealths = InitialWealth.rescaleWealth(pop.map(initialWealth), pop)
val cities =
for {
(_population,
_district,
_state,
_districtCapital,
_stateCapital,
_initialWealth) <- pop zip IndusFile.districts zip IndusFile.states zip IndusFile.districtCapitals zip IndusFile.stateCapitals zip initialWealths map (flatten)
} yield {
City(
population = _population,
district = _district,
state = _state,
districtCapital = _districtCapital,
stateCapital = _stateCapital,
wealth = _initialWealth
)
}
cities.take(IndusFile.nbCities).toVector
}
}
| ISCPIF/PSEExperiments | simpuzzle-src/models/indus/model/src/main/scala/fr/geocites/indus/Indus.scala | Scala | agpl-3.0 | 2,665 |
package io.github.datamoth.dm.api
package oozie
import java.io.File
case class Project(
meta: io.github.datamoth.dm.api.Project
, coordinators : List[Coordinator]
, databundles : List[Databundle]
, workflows : List[Workflow]
, datasets : List[Dataset]
, plugins : List[Plugin]
, errors : List[Error]
, warnings : List[Error]
, files : List[Location]
, hueUrl : Option[String] = None
, deploy : Option[DeployInfo] = None
, _workDir : File = null
) {
def withCoordinators(coordinators: List[Coordinator]): Project = copy(coordinators = coordinators)
def withDeploy(di: DeployInfo): Project = copy(deploy = Some(di))
def withErrors(errors: List[Error]): Project = copy(errors = errors)
}
| datamoth/datamoth | datamot/src/main/scala/io/github/datamoth/dm/api/oozie/Project.scala | Scala | apache-2.0 | 718 |
// This is free and unencumbered software released into the public domain.
//
// Anyone is free to copy, modify, publish, use, compile, sell, or
// distribute this software, either in source code form or as a compiled
// binary, for any purpose, commercial or non-commercial, and by any
// means.
//
// In jurisdictions that recognize copyright laws, the author or authors
// of this software dedicate any and all copyright interest in the
// software to the public domain. We make this dedication for the benefit
// of the public at large and to the detriment of our heirs and
// successors. We intend this dedication to be an overt act of
// relinquishment in perpetuity of all present and future rights to this
// software under copyright law.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
//
// For more information, please refer to <http://unlicense.org/>
package com.pasviegas.shoushiling.core.engine
import com.pasviegas.shoushiling.core.GamePlay.{Match, Player, Throw}
import org.scalatest.{FlatSpec, MustMatchers}
class GameOutcomeTest extends FlatSpec with MustMatchers {
import com.pasviegas.shoushiling.core._
"Game Rule: Rock crushes Scissors" must "have Rock as winner and Scissors as loser" in {
Win(Match(Player("1", Throw(Rock)) -> Player("2", Throw(Scissors))), Some(Player("1", Throw(Rock))))
.toString must be("Player 1 wins with Rock")
}
}
| pasviegas/shoushiling | core/src/test/scala/com/pasviegas/shoushiling/core/engine/GameOutcomeTest.scala | Scala | unlicense | 1,801 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.crossdata.streaming.kafka
import com.stratio.crossdata.streaming.constants.KafkaConstants
import kafka.serializer.StringDecoder
import org.apache.spark.sql.crossdata.models.KafkaOptionsModel
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka.KafkaUtils
import KafkaConstants._
class KafkaInput(options: KafkaOptionsModel) {
def createStream(ssc: StreamingContext): DStream[(String, String)] = {
val kafkaParams = options.additionalOptions
val connection = Map(getConnection)
val groupId = Map(getGroupId)
KafkaUtils.createStream[String, String, StringDecoder, StringDecoder](
ssc,
connection ++ groupId ++ kafkaParams,
getTopics,
storageLevel(options.storageLevel))
}
private[streaming] def getConnection : (String, String) = {
val connectionChain = (
for(zkConnection <- options.connection.zkConnection) yield (s"${zkConnection.host}:${zkConnection.port}")
).mkString(",")
(ZookeeperConnectionKey, if(connectionChain.isEmpty) s"$DefaultHost:$DefaultConsumerPort" else connectionChain)
}
private[streaming] def getGroupId : (String, String) = (GroupIdKey, options.groupId)
private[streaming] def getTopics : Map[String, Int] = {
if (options.topics.isEmpty) {
throw new IllegalStateException(s"Invalid configuration, topics must be declared.")
} else {
options.topics.map(topicModel => (topicModel.name, topicModel.numPartitions)).toMap
}
}
private[streaming] def storageLevel(sparkStorageLevel: String): StorageLevel = {
StorageLevel.fromString(sparkStorageLevel)
}
} | darroyocazorla/crossdata | streaming/src/main/scala/com/stratio/crossdata/streaming/kafka/KafkaInput.scala | Scala | apache-2.0 | 2,358 |
package scan
import java.io.FileNotFoundException
import java.io.IOException
import java.nio.file._
import cats._
import cats.data._
import cats.implicits._
import org.atnos.eff._
import org.atnos.eff.all._
import org.atnos.eff.syntax.all._
import org.atnos.eff.addon.monix._
import org.atnos.eff.addon.monix.task._
import org.atnos.eff.syntax.addon.monix.task._
import org.specs2._
import scala.collection.immutable.SortedSet
import scala.concurrent.duration._
import monix.eval._
import monix.execution.Scheduler.Implicits.global
class ScannerSpec extends mutable.Specification {
case class MockFilesystem(directories: Map[Directory, List[FilePath]], fileSizes: Map[File, Long]) extends Filesystem {
def length(file: File) = fileSizes.getOrElse(file, throw new IOException())
def listFiles(directory: Directory) = directories.getOrElse(directory, throw new IOException())
def filePath(path: String): FilePath =
if (directories.keySet.contains(Directory(path)))
Directory(path)
else if (fileSizes.keySet.contains(File(path)))
File(path)
else
throw new FileNotFoundException(path)
}
val base = Directory("base")
val linkTarget = File(s"/somewhere/else/7.txt")
val base1 = File(s"${base.path}/1.txt")
val baseLink = Symlink(s"${base.path}/7.txt", linkTarget)
val subdir = Directory(s"${base.path}/subdir")
val sub2 = File(s"${subdir.path}/2.txt")
val subLink = Symlink(s"${subdir.path}/7.txt", linkTarget)
val directories = Map(
base -> List(subdir, base1, baseLink),
subdir -> List(sub2, subLink)
)
val fileSizes = Map(base1 -> 1L, sub2 -> 2L, linkTarget -> 7L)
val fs = MockFilesystem(directories, fileSizes)
type R = Fx.fx5[Task, Reader[Filesystem, ?], Reader[ScanConfig, ?], Writer[Log, ?], State[Set[FilePath], ?]]
def run[T](program: Eff[R, T], fs: Filesystem) =
program.runReader(ScanConfig(2)).runReader(fs).evalStateZero[Set[FilePath]].taskAttempt.runWriter[Log].runAsync.runSyncUnsafe(3.seconds)
val expected = Right(new PathScan(SortedSet(FileSize(linkTarget, 7), FileSize(sub2, 2)), 10, 3))
val (actual, logs) = run(Scanner.pathScan[R](base), fs)
"Report Format" ! {actual.mustEqual(expected)}
}
| benhutchison/GettingWorkDoneWithExtensibleEffects | solutions/exerciseState/src/test/scala/scan/ScannerSpec.scala | Scala | apache-2.0 | 2,224 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.runtime.checkpoint
import cogx.compiler.parser.syntaxtree.UnpipelinedColorSensor
import cogx.platform.types.FieldType
/** Similar to the UnpipelinedTestSensor, but simpler because it needs only the factory object.
*
* @author Dick Carter
*/
object SimpleUnpipelinedTestColorSensor {
/** The factory method for this sensor. */
def apply(rows: Int, cols: Int, initState: Int = 0) = {
/** The smallest value supplied by the next iterator returned by this sensor. */
var state = initState
// Note the we subtract 1 from `state`, since we want to repeat the last iterator that supplied the current state.*/
/** The parameters that would restore this sensor to its current state. */
def parameters = (state - 1).toString
def reset() { state = initState}
def nextValue() = new Iterator[Byte] {
var nextValue = state
state += 1
var remainingValues = 3 * rows * cols
def next() = {
val retVal = nextValue
nextValue += 1
remainingValues -= 1
(retVal & 0xff).toByte
}
def hasNext = remainingValues > 0
}
new UnpipelinedColorSensor(rows, cols, nextValue _, reset _) {
override def restoreParameters = parameters
// The default restoringClass would identify this as an anonymous subclass of UnpipelinedSensor.
// We override this here to point to the SimpleUnpipelinedTestSensor factory object (so the restore method will be found)
override def restoringClass = SimpleUnpipelinedTestColorSensor
}
}
/** The factory method used to create an unpipelined color sensor from its stored parameter string. */
def restore(fieldType: FieldType, parameterString: String) = {
require(fieldType.dimensions == 2 && fieldType.tensorShape.points == 3,
"Expecting 2D ColorField Sensor, found " + fieldType)
val parameters = parameterString.split(" ")
require(parameters.length == 1, "Expecting 1 parameter, found " + parameters.length)
val initState = parameters(0).toInt
apply(fieldType.rows, fieldType.columns, initState)
}
}
| hpe-cct/cct-core | src/test/scala/cogx/runtime/checkpoint/SimpleUnpipelinedTestColorSensor.scala | Scala | apache-2.0 | 2,722 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.util.concurrent.atomic.AtomicReference
import scala.collection.mutable
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.{TypeCheckResult, TypeCoercion, UnresolvedAttribute, UnresolvedException}
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.array.ByteArrayMethods
/**
* A placeholder of lambda variables to prevent unexpected resolution of [[LambdaFunction]].
*/
case class UnresolvedNamedLambdaVariable(nameParts: Seq[String])
extends LeafExpression with NamedExpression with Unevaluable {
override def name: String =
nameParts.map(n => if (n.contains(".")) s"`$n`" else n).mkString(".")
override def exprId: ExprId = throw new UnresolvedException(this, "exprId")
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override def qualifier: Seq[String] = throw new UnresolvedException(this, "qualifier")
override def toAttribute: Attribute = throw new UnresolvedException(this, "toAttribute")
override def newInstance(): NamedExpression = throw new UnresolvedException(this, "newInstance")
override lazy val resolved = false
override def toString: String = s"lambda '$name"
override def sql: String = name
}
/**
* A named lambda variable.
*/
case class NamedLambdaVariable(
name: String,
dataType: DataType,
nullable: Boolean,
exprId: ExprId = NamedExpression.newExprId,
value: AtomicReference[Any] = new AtomicReference())
extends LeafExpression
with NamedExpression
with CodegenFallback {
override def qualifier: Seq[String] = Seq.empty
override def newInstance(): NamedExpression =
copy(exprId = NamedExpression.newExprId, value = new AtomicReference())
override def toAttribute: Attribute = {
AttributeReference(name, dataType, nullable, Metadata.empty)(exprId, Seq.empty)
}
override def eval(input: InternalRow): Any = value.get
override def toString: String = s"lambda $name#${exprId.id}$typeSuffix"
override def simpleString: String = s"lambda $name#${exprId.id}: ${dataType.simpleString}"
}
/**
* A lambda function and its arguments. A lambda function can be hidden when a user wants to
* process an completely independent expression in a [[HigherOrderFunction]], the lambda function
* and its variables are then only used for internal bookkeeping within the higher order function.
*/
case class LambdaFunction(
function: Expression,
arguments: Seq[NamedExpression],
hidden: Boolean = false)
extends Expression with CodegenFallback {
override def children: Seq[Expression] = function +: arguments
override def dataType: DataType = function.dataType
override def nullable: Boolean = function.nullable
lazy val bound: Boolean = arguments.forall(_.resolved)
override def eval(input: InternalRow): Any = function.eval(input)
}
object LambdaFunction {
val identity: LambdaFunction = {
val id = UnresolvedNamedLambdaVariable(Seq("id"))
LambdaFunction(id, Seq(id))
}
}
/**
* A higher order function takes one or more (lambda) functions and applies these to some objects.
* The function produces a number of variables which can be consumed by some lambda function.
*/
trait HigherOrderFunction extends Expression with ExpectsInputTypes {
override def nullable: Boolean = arguments.exists(_.nullable)
override def children: Seq[Expression] = arguments ++ functions
/**
* Arguments of the higher ordered function.
*/
def arguments: Seq[Expression]
def argumentTypes: Seq[AbstractDataType]
/**
* All arguments have been resolved. This means that the types and nullabilty of (most of) the
* lambda function arguments is known, and that we can start binding the lambda functions.
*/
lazy val argumentsResolved: Boolean = arguments.forall(_.resolved)
/**
* Checks the argument data types, returns `TypeCheckResult.success` if it's valid,
* or returns a `TypeCheckResult` with an error message if invalid.
* Note: it's not valid to call this method until `argumentsResolved == true`.
*/
def checkArgumentDataTypes(): TypeCheckResult = {
ExpectsInputTypes.checkInputDataTypes(arguments, argumentTypes)
}
/**
* Functions applied by the higher order function.
*/
def functions: Seq[Expression]
def functionTypes: Seq[AbstractDataType]
override def inputTypes: Seq[AbstractDataType] = argumentTypes ++ functionTypes
/**
* All inputs must be resolved and all functions must be resolved lambda functions.
*/
override lazy val resolved: Boolean = argumentsResolved && functions.forall {
case l: LambdaFunction => l.resolved
case _ => false
}
/**
* Bind the lambda functions to the [[HigherOrderFunction]] using the given bind function. The
* bind function takes the potential lambda and it's (partial) arguments and converts this into
* a bound lambda function.
*/
def bind(f: (Expression, Seq[(DataType, Boolean)]) => LambdaFunction): HigherOrderFunction
// Make sure the lambda variables refer the same instances as of arguments for case that the
// variables in instantiated separately during serialization or for some reason.
@transient lazy val functionsForEval: Seq[Expression] = functions.map {
case LambdaFunction(function, arguments, hidden) =>
val argumentMap = arguments.map { arg => arg.exprId -> arg }.toMap
function.transformUp {
case variable: NamedLambdaVariable if argumentMap.contains(variable.exprId) =>
argumentMap(variable.exprId)
}
}
}
/**
* Trait for functions having as input one argument and one function.
*/
trait SimpleHigherOrderFunction extends HigherOrderFunction {
def argument: Expression
override def arguments: Seq[Expression] = argument :: Nil
def argumentType: AbstractDataType
override def argumentTypes(): Seq[AbstractDataType] = argumentType :: Nil
def function: Expression
override def functions: Seq[Expression] = function :: Nil
def functionType: AbstractDataType = AnyDataType
override def functionTypes: Seq[AbstractDataType] = functionType :: Nil
def functionForEval: Expression = functionsForEval.head
/**
* Called by [[eval]]. If a subclass keeps the default nullability, it can override this method
* in order to save null-check code.
*/
protected def nullSafeEval(inputRow: InternalRow, argumentValue: Any): Any =
sys.error(s"UnaryHigherOrderFunction must override either eval or nullSafeEval")
override def eval(inputRow: InternalRow): Any = {
val value = argument.eval(inputRow)
if (value == null) {
null
} else {
nullSafeEval(inputRow, value)
}
}
}
trait ArrayBasedSimpleHigherOrderFunction extends SimpleHigherOrderFunction {
override def argumentType: AbstractDataType = ArrayType
}
trait MapBasedSimpleHigherOrderFunction extends SimpleHigherOrderFunction {
override def argumentType: AbstractDataType = MapType
}
/**
* Transform elements in an array using the transform function. This is similar to
* a `map` in functional programming.
*/
@ExpressionDescription(
usage = "_FUNC_(expr, func) - Transforms elements in an array using the function.",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3), x -> x + 1);
[2,3,4]
> SELECT _FUNC_(array(1, 2, 3), (x, i) -> x + i);
[1,3,5]
""",
since = "2.4.0")
case class ArrayTransform(
argument: Expression,
function: Expression)
extends ArrayBasedSimpleHigherOrderFunction with CodegenFallback {
override def dataType: ArrayType = ArrayType(function.dataType, function.nullable)
override def bind(f: (Expression, Seq[(DataType, Boolean)]) => LambdaFunction): ArrayTransform = {
val ArrayType(elementType, containsNull) = argument.dataType
function match {
case LambdaFunction(_, arguments, _) if arguments.size == 2 =>
copy(function = f(function, (elementType, containsNull) :: (IntegerType, false) :: Nil))
case _ =>
copy(function = f(function, (elementType, containsNull) :: Nil))
}
}
@transient lazy val (elementVar, indexVar) = {
val LambdaFunction(_, (elementVar: NamedLambdaVariable) +: tail, _) = function
val indexVar = if (tail.nonEmpty) {
Some(tail.head.asInstanceOf[NamedLambdaVariable])
} else {
None
}
(elementVar, indexVar)
}
override def nullSafeEval(inputRow: InternalRow, argumentValue: Any): Any = {
val arr = argumentValue.asInstanceOf[ArrayData]
val f = functionForEval
val result = new GenericArrayData(new Array[Any](arr.numElements))
var i = 0
while (i < arr.numElements) {
elementVar.value.set(arr.get(i, elementVar.dataType))
if (indexVar.isDefined) {
indexVar.get.value.set(i)
}
result.update(i, f.eval(inputRow))
i += 1
}
result
}
override def prettyName: String = "transform"
}
/**
* Filters entries in a map using the provided function.
*/
@ExpressionDescription(
usage = "_FUNC_(expr, func) - Filters entries in a map using the function.",
examples = """
Examples:
> SELECT _FUNC_(map(1, 0, 2, 2, 3, -1), (k, v) -> k > v);
{1:0,3:-1}
""",
since = "3.0.0")
case class MapFilter(
argument: Expression,
function: Expression)
extends MapBasedSimpleHigherOrderFunction with CodegenFallback {
@transient lazy val (keyVar, valueVar) = {
val args = function.asInstanceOf[LambdaFunction].arguments
(args.head.asInstanceOf[NamedLambdaVariable], args.tail.head.asInstanceOf[NamedLambdaVariable])
}
@transient lazy val MapType(keyType, valueType, valueContainsNull) = argument.dataType
override def bind(f: (Expression, Seq[(DataType, Boolean)]) => LambdaFunction): MapFilter = {
copy(function = f(function, (keyType, false) :: (valueType, valueContainsNull) :: Nil))
}
override def nullSafeEval(inputRow: InternalRow, argumentValue: Any): Any = {
val m = argumentValue.asInstanceOf[MapData]
val f = functionForEval
val retKeys = new mutable.ListBuffer[Any]
val retValues = new mutable.ListBuffer[Any]
m.foreach(keyType, valueType, (k, v) => {
keyVar.value.set(k)
valueVar.value.set(v)
if (f.eval(inputRow).asInstanceOf[Boolean]) {
retKeys += k
retValues += v
}
})
ArrayBasedMapData(retKeys.toArray, retValues.toArray)
}
override def dataType: DataType = argument.dataType
override def functionType: AbstractDataType = BooleanType
override def prettyName: String = "map_filter"
}
/**
* Filters the input array using the given lambda function.
*/
@ExpressionDescription(
usage = "_FUNC_(expr, func) - Filters the input array using the given predicate.",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3), x -> x % 2 == 1);
[1,3]
""",
since = "2.4.0")
case class ArrayFilter(
argument: Expression,
function: Expression)
extends ArrayBasedSimpleHigherOrderFunction with CodegenFallback {
override def dataType: DataType = argument.dataType
override def functionType: AbstractDataType = BooleanType
override def bind(f: (Expression, Seq[(DataType, Boolean)]) => LambdaFunction): ArrayFilter = {
val ArrayType(elementType, containsNull) = argument.dataType
copy(function = f(function, (elementType, containsNull) :: Nil))
}
@transient lazy val LambdaFunction(_, Seq(elementVar: NamedLambdaVariable), _) = function
override def nullSafeEval(inputRow: InternalRow, argumentValue: Any): Any = {
val arr = argumentValue.asInstanceOf[ArrayData]
val f = functionForEval
val buffer = new mutable.ArrayBuffer[Any](arr.numElements)
var i = 0
while (i < arr.numElements) {
elementVar.value.set(arr.get(i, elementVar.dataType))
if (f.eval(inputRow).asInstanceOf[Boolean]) {
buffer += elementVar.value.get
}
i += 1
}
new GenericArrayData(buffer)
}
override def prettyName: String = "filter"
}
/**
* Tests whether a predicate holds for one or more elements in the array.
*/
@ExpressionDescription(usage =
"_FUNC_(expr, pred) - Tests whether a predicate holds for one or more elements in the array.",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3), x -> x % 2 == 0);
true
""",
since = "2.4.0")
case class ArrayExists(
argument: Expression,
function: Expression)
extends ArrayBasedSimpleHigherOrderFunction with CodegenFallback {
override def dataType: DataType = BooleanType
override def functionType: AbstractDataType = BooleanType
override def bind(f: (Expression, Seq[(DataType, Boolean)]) => LambdaFunction): ArrayExists = {
val ArrayType(elementType, containsNull) = argument.dataType
copy(function = f(function, (elementType, containsNull) :: Nil))
}
@transient lazy val LambdaFunction(_, Seq(elementVar: NamedLambdaVariable), _) = function
override def nullSafeEval(inputRow: InternalRow, argumentValue: Any): Any = {
val arr = argumentValue.asInstanceOf[ArrayData]
val f = functionForEval
var exists = false
var i = 0
while (i < arr.numElements && !exists) {
elementVar.value.set(arr.get(i, elementVar.dataType))
if (f.eval(inputRow).asInstanceOf[Boolean]) {
exists = true
}
i += 1
}
exists
}
override def prettyName: String = "exists"
}
/**
* Applies a binary operator to a start value and all elements in the array.
*/
@ExpressionDescription(
usage =
"""
_FUNC_(expr, start, merge, finish) - Applies a binary operator to an initial state and all
elements in the array, and reduces this to a single state. The final state is converted
into the final result by applying a finish function.
""",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3), 0, (acc, x) -> acc + x);
6
> SELECT _FUNC_(array(1, 2, 3), 0, (acc, x) -> acc + x, acc -> acc * 10);
60
""",
since = "2.4.0")
case class ArrayAggregate(
argument: Expression,
zero: Expression,
merge: Expression,
finish: Expression)
extends HigherOrderFunction with CodegenFallback {
def this(argument: Expression, zero: Expression, merge: Expression) = {
this(argument, zero, merge, LambdaFunction.identity)
}
override def arguments: Seq[Expression] = argument :: zero :: Nil
override def argumentTypes: Seq[AbstractDataType] = ArrayType :: AnyDataType :: Nil
override def functions: Seq[Expression] = merge :: finish :: Nil
override def functionTypes: Seq[AbstractDataType] = zero.dataType :: AnyDataType :: Nil
override def nullable: Boolean = argument.nullable || finish.nullable
override def dataType: DataType = finish.dataType
override def checkInputDataTypes(): TypeCheckResult = {
checkArgumentDataTypes() match {
case TypeCheckResult.TypeCheckSuccess =>
if (!DataType.equalsStructurally(
zero.dataType, merge.dataType, ignoreNullability = true)) {
TypeCheckResult.TypeCheckFailure(
s"argument 3 requires ${zero.dataType.simpleString} type, " +
s"however, '${merge.sql}' is of ${merge.dataType.catalogString} type.")
} else {
TypeCheckResult.TypeCheckSuccess
}
case failure => failure
}
}
override def bind(f: (Expression, Seq[(DataType, Boolean)]) => LambdaFunction): ArrayAggregate = {
// Be very conservative with nullable. We cannot be sure that the accumulator does not
// evaluate to null. So we always set nullable to true here.
val ArrayType(elementType, containsNull) = argument.dataType
val acc = zero.dataType -> true
val newMerge = f(merge, acc :: (elementType, containsNull) :: Nil)
val newFinish = f(finish, acc :: Nil)
copy(merge = newMerge, finish = newFinish)
}
@transient lazy val LambdaFunction(_,
Seq(accForMergeVar: NamedLambdaVariable, elementVar: NamedLambdaVariable), _) = merge
@transient lazy val LambdaFunction(_, Seq(accForFinishVar: NamedLambdaVariable), _) = finish
override def eval(input: InternalRow): Any = {
val arr = argument.eval(input).asInstanceOf[ArrayData]
if (arr == null) {
null
} else {
val Seq(mergeForEval, finishForEval) = functionsForEval
accForMergeVar.value.set(zero.eval(input))
var i = 0
while (i < arr.numElements()) {
elementVar.value.set(arr.get(i, elementVar.dataType))
accForMergeVar.value.set(mergeForEval.eval(input))
i += 1
}
accForFinishVar.value.set(accForMergeVar.value.get)
finishForEval.eval(input)
}
}
override def prettyName: String = "aggregate"
}
/**
* Transform Keys for every entry of the map by applying the transform_keys function.
* Returns map with transformed key entries
*/
@ExpressionDescription(
usage = "_FUNC_(expr, func) - Transforms elements in a map using the function.",
examples = """
Examples:
> SELECT _FUNC_(map_from_arrays(array(1, 2, 3), array(1, 2, 3)), (k, v) -> k + 1);
{2:1,3:2,4:3}
> SELECT _FUNC_(map_from_arrays(array(1, 2, 3), array(1, 2, 3)), (k, v) -> k + v);
{2:1,4:2,6:3}
""",
since = "3.0.0")
case class TransformKeys(
argument: Expression,
function: Expression)
extends MapBasedSimpleHigherOrderFunction with CodegenFallback {
@transient lazy val MapType(keyType, valueType, valueContainsNull) = argument.dataType
override def dataType: MapType = MapType(function.dataType, valueType, valueContainsNull)
override def checkInputDataTypes(): TypeCheckResult = {
TypeUtils.checkForMapKeyType(function.dataType)
}
override def bind(f: (Expression, Seq[(DataType, Boolean)]) => LambdaFunction): TransformKeys = {
copy(function = f(function, (keyType, false) :: (valueType, valueContainsNull) :: Nil))
}
@transient lazy val LambdaFunction(
_, (keyVar: NamedLambdaVariable) :: (valueVar: NamedLambdaVariable) :: Nil, _) = function
private lazy val mapBuilder = new ArrayBasedMapBuilder(dataType.keyType, dataType.valueType)
override def nullSafeEval(inputRow: InternalRow, argumentValue: Any): Any = {
val map = argumentValue.asInstanceOf[MapData]
val resultKeys = new GenericArrayData(new Array[Any](map.numElements))
var i = 0
while (i < map.numElements) {
keyVar.value.set(map.keyArray().get(i, keyVar.dataType))
valueVar.value.set(map.valueArray().get(i, valueVar.dataType))
val result = functionForEval.eval(inputRow)
resultKeys.update(i, result)
i += 1
}
mapBuilder.from(resultKeys, map.valueArray())
}
override def prettyName: String = "transform_keys"
}
/**
* Returns a map that applies the function to each value of the map.
*/
@ExpressionDescription(
usage = "_FUNC_(expr, func) - Transforms values in the map using the function.",
examples = """
Examples:
> SELECT _FUNC_(map_from_arrays(array(1, 2, 3), array(1, 2, 3)), (k, v) -> v + 1);
{1:2,2:3,3:4}
> SELECT _FUNC_(map_from_arrays(array(1, 2, 3), array(1, 2, 3)), (k, v) -> k + v);
{1:2,2:4,3:6}
""",
since = "3.0.0")
case class TransformValues(
argument: Expression,
function: Expression)
extends MapBasedSimpleHigherOrderFunction with CodegenFallback {
@transient lazy val MapType(keyType, valueType, valueContainsNull) = argument.dataType
override def dataType: DataType = MapType(keyType, function.dataType, function.nullable)
override def bind(f: (Expression, Seq[(DataType, Boolean)]) => LambdaFunction)
: TransformValues = {
copy(function = f(function, (keyType, false) :: (valueType, valueContainsNull) :: Nil))
}
@transient lazy val LambdaFunction(
_, (keyVar: NamedLambdaVariable) :: (valueVar: NamedLambdaVariable) :: Nil, _) = function
override def nullSafeEval(inputRow: InternalRow, argumentValue: Any): Any = {
val map = argumentValue.asInstanceOf[MapData]
val resultValues = new GenericArrayData(new Array[Any](map.numElements))
var i = 0
while (i < map.numElements) {
keyVar.value.set(map.keyArray().get(i, keyVar.dataType))
valueVar.value.set(map.valueArray().get(i, valueVar.dataType))
resultValues.update(i, functionForEval.eval(inputRow))
i += 1
}
new ArrayBasedMapData(map.keyArray(), resultValues)
}
override def prettyName: String = "transform_values"
}
/**
* Merges two given maps into a single map by applying function to the pair of values with
* the same key.
*/
@ExpressionDescription(
usage =
"""
_FUNC_(map1, map2, function) - Merges two given maps into a single map by applying
function to the pair of values with the same key. For keys only presented in one map,
NULL will be passed as the value for the missing key. If an input map contains duplicated
keys, only the first entry of the duplicated key is passed into the lambda function.
""",
examples = """
Examples:
> SELECT _FUNC_(map(1, 'a', 2, 'b'), map(1, 'x', 2, 'y'), (k, v1, v2) -> concat(v1, v2));
{1:"ax",2:"by"}
""",
since = "3.0.0")
case class MapZipWith(left: Expression, right: Expression, function: Expression)
extends HigherOrderFunction with CodegenFallback {
def functionForEval: Expression = functionsForEval.head
@transient lazy val MapType(leftKeyType, leftValueType, leftValueContainsNull) = left.dataType
@transient lazy val MapType(rightKeyType, rightValueType, rightValueContainsNull) = right.dataType
@transient lazy val keyType =
TypeCoercion.findCommonTypeDifferentOnlyInNullFlags(leftKeyType, rightKeyType).get
@transient lazy val ordering = TypeUtils.getInterpretedOrdering(keyType)
override def arguments: Seq[Expression] = left :: right :: Nil
override def argumentTypes: Seq[AbstractDataType] = MapType :: MapType :: Nil
override def functions: Seq[Expression] = function :: Nil
override def functionTypes: Seq[AbstractDataType] = AnyDataType :: Nil
override def dataType: DataType = MapType(keyType, function.dataType, function.nullable)
override def bind(f: (Expression, Seq[(DataType, Boolean)]) => LambdaFunction): MapZipWith = {
val arguments = Seq((keyType, false), (leftValueType, true), (rightValueType, true))
copy(function = f(function, arguments))
}
override def checkArgumentDataTypes(): TypeCheckResult = {
super.checkArgumentDataTypes() match {
case TypeCheckResult.TypeCheckSuccess =>
if (leftKeyType.sameType(rightKeyType)) {
TypeUtils.checkForOrderingExpr(leftKeyType, s"function $prettyName")
} else {
TypeCheckResult.TypeCheckFailure(s"The input to function $prettyName should have " +
s"been two ${MapType.simpleString}s with compatible key types, but the key types are " +
s"[${leftKeyType.catalogString}, ${rightKeyType.catalogString}].")
}
case failure => failure
}
}
override def checkInputDataTypes(): TypeCheckResult = checkArgumentDataTypes()
override def eval(input: InternalRow): Any = {
val value1 = left.eval(input)
if (value1 == null) {
null
} else {
val value2 = right.eval(input)
if (value2 == null) {
null
} else {
nullSafeEval(input, value1, value2)
}
}
}
@transient lazy val LambdaFunction(_, Seq(
keyVar: NamedLambdaVariable,
value1Var: NamedLambdaVariable,
value2Var: NamedLambdaVariable),
_) = function
/**
* The function accepts two key arrays and returns a collection of keys with indexes
* to value arrays. Indexes are represented as an array of two items. This is a small
* optimization leveraging mutability of arrays.
*/
@transient private lazy val getKeysWithValueIndexes:
(ArrayData, ArrayData) => mutable.Iterable[(Any, Array[Option[Int]])] = {
if (TypeUtils.typeWithProperEquals(keyType)) {
getKeysWithIndexesFast
} else {
getKeysWithIndexesBruteForce
}
}
private def assertSizeOfArrayBuffer(size: Int): Unit = {
if (size > ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH) {
throw new RuntimeException(s"Unsuccessful try to zip maps with $size " +
s"unique keys due to exceeding the array size limit " +
s"${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.")
}
}
private def getKeysWithIndexesFast(keys1: ArrayData, keys2: ArrayData) = {
val hashMap = new mutable.LinkedHashMap[Any, Array[Option[Int]]]
for((z, array) <- Array((0, keys1), (1, keys2))) {
var i = 0
while (i < array.numElements()) {
val key = array.get(i, keyType)
hashMap.get(key) match {
case Some(indexes) =>
if (indexes(z).isEmpty) {
indexes(z) = Some(i)
}
case None =>
val indexes = Array[Option[Int]](None, None)
indexes(z) = Some(i)
hashMap.put(key, indexes)
}
i += 1
}
}
hashMap
}
private def getKeysWithIndexesBruteForce(keys1: ArrayData, keys2: ArrayData) = {
val arrayBuffer = new mutable.ArrayBuffer[(Any, Array[Option[Int]])]
for((z, array) <- Array((0, keys1), (1, keys2))) {
var i = 0
while (i < array.numElements()) {
val key = array.get(i, keyType)
var found = false
var j = 0
while (!found && j < arrayBuffer.size) {
val (bufferKey, indexes) = arrayBuffer(j)
if (ordering.equiv(bufferKey, key)) {
found = true
if(indexes(z).isEmpty) {
indexes(z) = Some(i)
}
}
j += 1
}
if (!found) {
assertSizeOfArrayBuffer(arrayBuffer.size)
val indexes = Array[Option[Int]](None, None)
indexes(z) = Some(i)
arrayBuffer += Tuple2(key, indexes)
}
i += 1
}
}
arrayBuffer
}
private def nullSafeEval(inputRow: InternalRow, value1: Any, value2: Any): Any = {
val mapData1 = value1.asInstanceOf[MapData]
val mapData2 = value2.asInstanceOf[MapData]
val keysWithIndexes = getKeysWithValueIndexes(mapData1.keyArray(), mapData2.keyArray())
val size = keysWithIndexes.size
val keys = new GenericArrayData(new Array[Any](size))
val values = new GenericArrayData(new Array[Any](size))
val valueData1 = mapData1.valueArray()
val valueData2 = mapData2.valueArray()
var i = 0
for ((key, Array(index1, index2)) <- keysWithIndexes) {
val v1 = index1.map(valueData1.get(_, leftValueType)).getOrElse(null)
val v2 = index2.map(valueData2.get(_, rightValueType)).getOrElse(null)
keyVar.value.set(key)
value1Var.value.set(v1)
value2Var.value.set(v2)
keys.update(i, key)
values.update(i, functionForEval.eval(inputRow))
i += 1
}
new ArrayBasedMapData(keys, values)
}
override def prettyName: String = "map_zip_with"
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(left, right, func) - Merges the two given arrays, element-wise, into a single array using function. If one array is shorter, nulls are appended at the end to match the length of the longer array, before applying function.",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3), array('a', 'b', 'c'), (x, y) -> (y, x));
[{"y":"a","x":1},{"y":"b","x":2},{"y":"c","x":3}]
> SELECT _FUNC_(array(1, 2), array(3, 4), (x, y) -> x + y);
[4,6]
> SELECT _FUNC_(array('a', 'b', 'c'), array('d', 'e', 'f'), (x, y) -> concat(x, y));
["ad","be","cf"]
""",
since = "2.4.0")
// scalastyle:on line.size.limit
case class ZipWith(left: Expression, right: Expression, function: Expression)
extends HigherOrderFunction with CodegenFallback {
def functionForEval: Expression = functionsForEval.head
override def arguments: Seq[Expression] = left :: right :: Nil
override def argumentTypes: Seq[AbstractDataType] = ArrayType :: ArrayType :: Nil
override def functions: Seq[Expression] = List(function)
override def functionTypes: Seq[AbstractDataType] = AnyDataType :: Nil
override def dataType: ArrayType = ArrayType(function.dataType, function.nullable)
override def bind(f: (Expression, Seq[(DataType, Boolean)]) => LambdaFunction): ZipWith = {
val ArrayType(leftElementType, _) = left.dataType
val ArrayType(rightElementType, _) = right.dataType
copy(function = f(function,
(leftElementType, true) :: (rightElementType, true) :: Nil))
}
@transient lazy val LambdaFunction(_,
Seq(leftElemVar: NamedLambdaVariable, rightElemVar: NamedLambdaVariable), _) = function
override def eval(input: InternalRow): Any = {
val leftArr = left.eval(input).asInstanceOf[ArrayData]
if (leftArr == null) {
null
} else {
val rightArr = right.eval(input).asInstanceOf[ArrayData]
if (rightArr == null) {
null
} else {
val resultLength = math.max(leftArr.numElements(), rightArr.numElements())
val f = functionForEval
val result = new GenericArrayData(new Array[Any](resultLength))
var i = 0
while (i < resultLength) {
if (i < leftArr.numElements()) {
leftElemVar.value.set(leftArr.get(i, leftElemVar.dataType))
} else {
leftElemVar.value.set(null)
}
if (i < rightArr.numElements()) {
rightElemVar.value.set(rightArr.get(i, rightElemVar.dataType))
} else {
rightElemVar.value.set(null)
}
result.update(i, f.eval(input))
i += 1
}
result
}
}
}
override def prettyName: String = "zip_with"
}
| mdespriee/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/higherOrderFunctions.scala | Scala | apache-2.0 | 30,683 |
package com.catinthedark.yoba
import com.catinthedark.yoba.entity.Creatures
import com.catinthedark.yoba.entity.Creatures.Creature
import scala.collection.mutable
/**
* Created by over on 18.04.15.
*/
class Shared(var lvlTime: Float,
var lvlDistance: Float,
var lvl: Int,
var speed: Float,
var cursorPosition: Float,
val creatures: mutable.ListBuffer[Creature],
var lastSpawnDistance: Float = 0f,
var palkaPos: Float = -1.0f,
var shouldStartTimer: Boolean = false,
var playerX: Float = 0f,
var isFalling: Boolean = false,
var playerZ: Float = 0.05f,
var trash: mutable.ListBuffer[Creature],
var username: String = ""
) {
def reset() = {
lvlTime = 0f
lvlDistance = 0f
lvl = 1
speed = 0f
cursorPosition = 0f
creatures.clear()
lastSpawnDistance = 0f
palkaPos = -1.0f
shouldStartTimer = false
isFalling = false
playerX = 0
playerZ = 0.05f
trash.clear()
username = ""
}
/**
* Called in onActivate in DayState
* @see DayState
*/
def prepareGame() = {
lvlTime = 0f
lvlDistance = 0f
speed = 0f
cursorPosition = 0f
creatures.clear()
lastSpawnDistance = 0f
palkaPos = -1.0f
shouldStartTimer = false
isFalling = false
playerX = 0
playerZ = 0.05f
trash.clear()
username = ""
preset()
}
def preset(): Unit = {
trash += Creatures.createSign(this, 1.4f, 0.3f)
trash += Creatures.createSign(this, -1.5f, 1.5f)
trash += Creatures.createSign(this, -1.5f, 0.03f)
trash += Creatures.createLamp(this, -1.25f, 1f)
trash += Creatures.createLamp(this, 1f, 0.01f)
trash += Creatures.createLamp(this, 1f, 0.7f)
trash += Creatures.createLamp(this, 1f, 0.4f)
trash += Creatures.createTree(this, 1.2f, 0.1f)
trash += Creatures.createTree(this, 1.2f, 0.4f)
trash += Creatures.createTree(this, 1.2f, 0.8f)
trash += Creatures.createTree(this, 1.2f, 1.2f)
}
} | cat-in-the-dark/old48_33_game | src/main/scala/com/catinthedark/yoba/Shared.scala | Scala | mit | 2,097 |
package almond.protocol
import com.github.plokhotnyuk.jsoniter_scala.core.JsonValueCodec
import com.github.plokhotnyuk.jsoniter_scala.macros.JsonCodecMaker
// See http://jupyter-client.readthedocs.io/en/5.2.3/kernels.html#kernel-specs
final case class KernelSpec(
argv: List[String],
display_name: String,
language: String,
interrupt_mode: Option[String] = None,
env: Map[String, String] = Map.empty,
metadata: Option[RawJson] = None
)
object KernelSpec {
implicit val codec: JsonValueCodec[KernelSpec] =
JsonCodecMaker.make
}
| alexarchambault/jupyter-scala | modules/shared/protocol/src/main/scala/almond/protocol/KernelSpec.scala | Scala | apache-2.0 | 549 |
package de.unihamburg.vsis.sddf.indexing.blocking
import org.apache.spark.mllib.rdd.RDDFunctions.fromRDD
import org.apache.spark.rdd.RDD
import org.apache.spark.rdd.RDD.rddToOrderedRDDFunctions
import de.unihamburg.vsis.sddf.Parameterized
import de.unihamburg.vsis.sddf.indexing.blocking.keygeneration.BlockingKeyBuilder
import de.unihamburg.vsis.sddf.pipe.context.AbstractPipeContext
import de.unihamburg.vsis.sddf.reading.Tuple
import de.unihamburg.vsis.sddf.visualisation.model.AlgoAnalysable
class PipeBlockerSortedNeighborhood(windowSize: Int = 10)(implicit bkvBuilder: BlockingKeyBuilder)
extends BlockingPipe
with Parameterized {
def step(tuples: RDD[Tuple])(implicit pipeContext: AbstractPipeContext): RDD[Seq[Tuple]] = {
val bkvTuplePairs: RDD[(String, Tuple)] = tuples.map(t => (bkvBuilder.buildBlockingKey(t), t))
val sortedPairs = bkvTuplePairs.sortByKey().map(_._2)
sortedPairs.sliding(windowSize).map(_.toSeq)
}
@transient override val _analysable = new AlgoAnalysable
_analysable.algo = this
_analysable.name = this.name
override val name = "SortedNeighborhoodBlocker"
override val paramMap = Map("windowSize" -> windowSize,
"BlockingKeyBuilder" -> bkvBuilder)
}
object PipeBlockerSortedNeighborhood {
def apply(windowSize: Int = 10)(implicit bkvBuilder: BlockingKeyBuilder) = {
new PipeBlockerSortedNeighborhood(windowSize)
}
}
| numbnut/sddf | src/main/scala/de/unihamburg/vsis/sddf/indexing/blocking/PipeBlockerSortedNeighborhood.scala | Scala | gpl-3.0 | 1,399 |
package mesosphere.marathon.core.appinfo
import mesosphere.marathon.core.appinfo.impl.TaskForStatistics
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.core.health.Health
import mesosphere.marathon.state.AppDefinition.VersionInfo
import mesosphere.marathon.state.AppDefinition.VersionInfo.FullVersionInfo
import mesosphere.marathon.state.Timestamp
case class TaskStatsByVersion(
maybeStartedAfterLastScaling: Option[TaskStats],
maybeWithLatestConfig: Option[TaskStats],
maybeWithOutdatedConfig: Option[TaskStats],
maybeTotalSummary: Option[TaskStats])
object TaskStatsByVersion {
def apply(
versionInfo: VersionInfo,
tasks: Iterable[TaskForStatistics]): TaskStatsByVersion =
{
def statsForVersion(versionTest: Timestamp => Boolean): Option[TaskStats] = {
TaskStats.forSomeTasks(tasks.filter(task => versionTest(task.version)))
}
val maybeFullVersionInfo = versionInfo match {
case full: FullVersionInfo => Some(full)
case _ => None
}
TaskStatsByVersion(
maybeTotalSummary = TaskStats.forSomeTasks(tasks),
maybeStartedAfterLastScaling = maybeFullVersionInfo.flatMap { vi =>
statsForVersion(_ >= vi.lastScalingAt)
},
maybeWithLatestConfig = maybeFullVersionInfo.flatMap { vi =>
statsForVersion(_ >= vi.lastConfigChangeAt)
},
maybeWithOutdatedConfig = maybeFullVersionInfo.flatMap { vi =>
statsForVersion(_ < vi.lastConfigChangeAt)
}
)
}
def apply(
now: Timestamp,
versionInfo: VersionInfo,
tasks: Iterable[Task],
statuses: Map[Task.Id, Seq[Health]]): TaskStatsByVersion =
{
TaskStatsByVersion(versionInfo, TaskForStatistics.forTasks(now, tasks, statuses))
}
}
case class TaskStats(
counts: TaskCounts,
maybeLifeTime: Option[TaskLifeTime])
object TaskStats {
def forSomeTasks(
now: Timestamp, tasks: Iterable[Task], statuses: Map[Task.Id, Seq[Health]]): Option[TaskStats] =
{
forSomeTasks(TaskForStatistics.forTasks(now, tasks, statuses))
}
def forSomeTasks(tasks: Iterable[TaskForStatistics]): Option[TaskStats] = {
if (tasks.isEmpty) {
None
} else {
Some(
TaskStats(
counts = TaskCounts(tasks),
maybeLifeTime = TaskLifeTime.forSomeTasks(tasks)
)
)
}
}
}
| timcharper/marathon | src/main/scala/mesosphere/marathon/core/appinfo/TaskStatsByVersion.scala | Scala | apache-2.0 | 2,383 |
package com.twitter.finagle.mux
import com.twitter.finagle.tracing.{SpanId, TraceId, Flags}
import com.twitter.finagle.{Dtab, Dentry, NameTree, Path}
import com.twitter.io.Charsets
import com.twitter.util.{Duration, Time}
import org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}
/**
* Indicates that encoding or decoding of a Mux message failed.
* Reason for failure should be provided by the `why` string.
*/
case class BadMessageException(why: String) extends Exception(why)
// TODO: when the new com.twitter.codec.Codec arrives, define Message
// parsing as a bijection between ChannelBuffers and Message.
/**
* Documentation details are in the [[com.twitter.finagle.mux]] package object.
*/
private[finagle] sealed trait Message {
/**
* Values should correspond to the constants defined in
* [[com.twitter.finagle.mux.Message.Types]]
*/
def typ: Byte
/** Only 3 of its bytes are used. */
def tag: Int
def buf: ChannelBuffer
}
private[finagle] object Message {
object Types {
// Application messages:
val Treq = 1: Byte
val Rreq = -1: Byte
val Tdispatch = 2: Byte
val Rdispatch = -2: Byte
// Control messages:
val Tdrain = 64: Byte
val Rdrain = -64: Byte
val Tping = 65: Byte
val Rping = -65: Byte
val Tdiscarded = 66: Byte
val Tlease = 67: Byte
val Rerr = -128: Byte
// Old implementation flukes.
val BAD_Tdiscarded = -62: Byte
val BAD_Rerr = 127: Byte
}
val MarkerTag = 0
val MinTag = 1
val MaxTag = (1<<23)-1
val TagMSB = (1<<23)
private def mkByte(b: Byte) =
ChannelBuffers.unmodifiableBuffer(ChannelBuffers.wrappedBuffer(Array(b)))
private val bufOfChar = Array[ChannelBuffer](
mkByte(0), mkByte(1), mkByte(2))
abstract class EmptyMessage extends Message {
def buf = ChannelBuffers.EMPTY_BUFFER
}
abstract class MarkerMessage extends Message {
def tag = 0
}
object Treq {
object Keys {
val TraceId = 1
val TraceFlag = 2
}
}
/** A transmit request message */
case class Treq(tag: Int, traceId: Option[TraceId], req: ChannelBuffer) extends Message {
import Treq._
def typ = Types.Treq
lazy val buf = {
val header = traceId match {
// Currently we require the 3-tuple, but this is not
// necessarily required.
case Some(traceId) =>
val hd = ChannelBuffers.buffer(1+1+1+24+1+1+1)
hd.writeByte(2) // 2 entries
hd.writeByte(Keys.TraceId) // key 0 (traceid)
hd.writeByte(24) // key 0 size
hd.writeLong(traceId.spanId.toLong)
hd.writeLong(traceId.parentId.toLong)
hd.writeLong(traceId.traceId.toLong)
hd.writeByte(Keys.TraceFlag) // key 1 (traceflag)
hd.writeByte(1) // key 1 size
hd.writeByte(traceId.flags.toLong.toByte)
hd
case None =>
bufOfChar(0) // 0 keys
}
ChannelBuffers.wrappedBuffer(header, req)
}
}
/** A reply to a `Treq` message */
abstract class Rreq(rreqType: Byte, body: ChannelBuffer) extends Message {
def typ = Types.Rreq
lazy val buf = ChannelBuffers.wrappedBuffer(bufOfChar(rreqType), body)
}
case class RreqOk(tag: Int, reply: ChannelBuffer) extends Rreq(0, reply)
case class RreqError(tag: Int, error: String) extends Rreq(1, encodeString(error))
case class RreqNack(tag: Int) extends Rreq(2, ChannelBuffers.EMPTY_BUFFER)
case class Tdispatch(
tag: Int,
contexts: Seq[(ChannelBuffer, ChannelBuffer)],
dst: String,
dtab: Dtab,
req: ChannelBuffer
) extends Message {
def typ = Types.Tdispatch
lazy val buf = {
var n = 2
var seq = contexts
while (seq.nonEmpty) {
val (k, v) = seq.head
n += 2+k.readableBytes + 2+v.readableBytes
seq = seq.tail
}
n += 2+dst.size
n += 2
var delegations = dtab.iterator
while (delegations.hasNext) {
val Dentry(src, dst) = delegations.next()
n += src.show.size+2 + dst.show.size+2
}
val hd = ChannelBuffers.dynamicBuffer(n)
hd.writeShort(contexts.length)
seq = contexts
while (seq.nonEmpty) {
// TODO: it may or may not make sense
// to do zero-copy here.
val (k, v) = seq.head
hd.writeShort(k.readableBytes)
hd.writeBytes(k.slice())
hd.writeShort(v.readableBytes)
hd.writeBytes(v.slice())
seq = seq.tail
}
val dstbytes = dst.getBytes(Charsets.Utf8)
hd.writeShort(dstbytes.size)
hd.writeBytes(dstbytes)
hd.writeShort(dtab.size)
delegations = dtab.iterator
while (delegations.hasNext) {
val Dentry(src, dst) = delegations.next()
val srcbytes = src.show.getBytes(Charsets.Utf8)
hd.writeShort(srcbytes.size)
hd.writeBytes(srcbytes)
val dstbytes = dst.show.getBytes(Charsets.Utf8)
hd.writeShort(dstbytes.size)
hd.writeBytes(dstbytes)
}
ChannelBuffers.wrappedBuffer(hd, req)
}
}
/** A reply to a `Tdispatch` message */
abstract class Rdispatch(
status: Byte,
contexts: Seq[(ChannelBuffer, ChannelBuffer)],
body: ChannelBuffer
) extends Message {
def typ = Types.Rdispatch
lazy val buf = {
var n = 1+2
var seq = contexts
while (seq.nonEmpty) {
val (k, v) = seq.head
n += 2+k.readableBytes+2+v.readableBytes
seq = seq.tail
}
val hd = ChannelBuffers.buffer(n)
hd.writeByte(status)
hd.writeShort(contexts.length)
seq = contexts
while (seq.nonEmpty) {
val (k, v) = seq.head
hd.writeShort(k.readableBytes)
hd.writeBytes(k.slice())
hd.writeShort(v.readableBytes)
hd.writeBytes(v.slice())
seq = seq.tail
}
ChannelBuffers.wrappedBuffer(hd, body)
}
}
case class RdispatchOk(
tag: Int,
contexts: Seq[(ChannelBuffer, ChannelBuffer)],
reply: ChannelBuffer
) extends Rdispatch(0, contexts, reply)
case class RdispatchError(
tag: Int,
contexts: Seq[(ChannelBuffer, ChannelBuffer)],
error: String
) extends Rdispatch(1, contexts, encodeString(error))
case class RdispatchNack(
tag: Int,
contexts: Seq[(ChannelBuffer, ChannelBuffer)]
) extends Rdispatch(2, contexts, ChannelBuffers.EMPTY_BUFFER)
/** Indicates to the client to stop sending new requests. */
case class Tdrain(tag: Int) extends EmptyMessage { def typ = Types.Tdrain }
/** Response from the client to a `Tdrain` message */
case class Rdrain(tag: Int) extends EmptyMessage { def typ = Types.Rdrain }
/** Used to check liveness */
case class Tping(tag: Int) extends EmptyMessage { def typ = Types.Tping }
/** Response to a `Tping` message */
case class Rping(tag: Int) extends EmptyMessage { def typ = Types.Rping }
/** Indicates that the corresponding T message produced an error. */
case class Rerr(tag: Int, error: String) extends Message {
// Use the old Rerr type in a transition period so that we
// can be reasonably sure we remain backwards compatible with
// old servers.
def typ = Types.BAD_Rerr
lazy val buf = encodeString(error)
}
/**
* Indicates that the `Treq` with the tag indicated by `which` has been discarded
* by the client.
*/
case class Tdiscarded(which: Int, why: String)
// Use the old Tdiscarded type in a transition period so that we
// can be reasonably sure we remain backwards compatible with
// old servers.
extends MarkerMessage {
def typ = Types.BAD_Tdiscarded
lazy val buf = ChannelBuffers.wrappedBuffer(
ChannelBuffers.wrappedBuffer(
Array[Byte]((which>>16 & 0xff).toByte, (which>>8 & 0xff).toByte, (which & 0xff).toByte)),
encodeString(why))
}
object Tlease {
val MinLease = Duration.Zero
val MaxLease = Duration.fromMilliseconds((1L << 32) - 1) // Unsigned Int max value
val MillisDuration: Byte = 0
def apply(howLong: Duration): Tlease = {
require(howLong >= MinLease && howLong <= MaxLease, "lease out of range")
Tlease(0, howLong.inMilliseconds)
}
def apply(end: Time): Tlease = Tlease(1, end.sinceEpoch.inMilliseconds)
}
case class Tlease(unit: Byte, howLong: Long) extends MarkerMessage {
def typ = Types.Tlease
lazy val buf = {
val b = ChannelBuffers.buffer(9)
b.writeByte(unit)
b.writeLong(howLong)
b
}
}
object Tmessage {
def unapply(m: Message): Option[Int] =
if (m.typ > 0) Some(m.tag)
else None
}
object Rmessage {
def unapply(m: Message): Option[Int] =
if (m.typ < 0) Some(m.tag)
else None
}
object ControlMessage {
// TODO: Update this extractor in the event that we "fix" the control
// message flukes by removing backwards compatibility.
def unapply(m: Message): Option[Int] =
if (math.abs(m.typ) >= 64 || m.typ == Types.BAD_Tdiscarded)
Some(m.tag)
else None
}
def decodeUtf8(buf: ChannelBuffer): String =
decodeUtf8(buf, buf.readableBytes)
def decodeUtf8(buf: ChannelBuffer, n: Int): String = {
val arr = new Array[Byte](n)
buf.readBytes(arr)
new String(arr, Charsets.Utf8)
}
def encodeString(str: String) =
ChannelBuffers.wrappedBuffer(str.getBytes(Charsets.Utf8))
private def decodeTreq(tag: Int, buf: ChannelBuffer) = {
if (buf.readableBytes < 1)
throw BadMessageException("short Treq")
var nkeys = buf.readByte().toInt
if (nkeys < 0)
throw BadMessageException("Treq: too many keys")
var trace3: Option[(SpanId, SpanId, SpanId)] = None
var traceFlags = 0L
while (nkeys > 0) {
if (buf.readableBytes < 2)
throw BadMessageException("short Treq (header)")
val key = buf.readByte()
val vsize = buf.readByte().toInt match {
case s if s < 0 => s + 256
case s => s
}
if (buf.readableBytes < vsize)
throw BadMessageException("short Treq (vsize)")
// TODO: technically we should probably check for duplicate
// keys, but for now, just pick the latest one.
key match {
case Treq.Keys.TraceId =>
if (vsize != 24)
throw BadMessageException("bad traceid size %d".format(vsize))
trace3 = Some(
SpanId(buf.readLong()), // spanId
SpanId(buf.readLong()), // parentId
SpanId(buf.readLong()) // traceId
)
case Treq.Keys.TraceFlag =>
// We only know about bit=0, so discard
// everything but the last byte
if (vsize > 1)
buf.readBytes(vsize-1)
if (vsize > 0)
traceFlags = buf.readByte().toLong
case _ =>
// discard:
buf.readBytes(vsize)
}
nkeys -= 1
}
val id = trace3 map { case (spanId, parentId, traceId) =>
TraceId(Some(traceId), Some(parentId), spanId, None, Flags(traceFlags))
}
Treq(tag, id, buf.slice())
}
private def decodeContexts(buf: ChannelBuffer): Seq[(ChannelBuffer, ChannelBuffer)] = {
val n = buf.readUnsignedShort()
if (n == 0)
return Seq.empty
val contexts = new Array[(ChannelBuffer, ChannelBuffer)](n)
var i = 0
while (i < n) {
val nk = buf.readUnsignedShort()
val k = buf.readSlice(nk)
val nv = buf.readUnsignedShort()
val v = buf.readSlice(nv)
contexts(i) = (k, v)
i += 1
}
contexts
}
private def decodeTdispatch(tag: Int, buf: ChannelBuffer) = {
val contexts = decodeContexts(buf)
val dstbuf = buf.readSlice(buf.readUnsignedShort())
val dst = decodeUtf8(dstbuf)
val nd = buf.readUnsignedShort()
val dtab = if (nd == 0) Dtab.empty else {
var i = 0
val delegations = new Array[Dentry](nd)
while (i < nd) {
val src = decodeUtf8(buf, buf.readUnsignedShort())
val dst = decodeUtf8(buf, buf.readUnsignedShort())
try {
delegations(i) = Dentry(Path.read(src), NameTree.read(dst))
} catch {
case _: IllegalArgumentException =>
delegations(i) = Dentry.nop
}
i += 1
}
Dtab(delegations)
}
Tdispatch(tag, contexts, dst, dtab, buf.slice())
}
private def decodeRdispatch(tag: Int, buf: ChannelBuffer) = {
val status = buf.readByte()
val contexts = decodeContexts(buf)
status match {
case 0 => RdispatchOk(tag, contexts, buf.slice())
case 1 => RdispatchError(tag, contexts, decodeUtf8(buf))
case 2 => RdispatchNack(tag, contexts)
case _ => throw BadMessageException("invalid Rdispatch status")
}
}
private def decodeRreq(tag: Int, buf: ChannelBuffer) = {
if (buf.readableBytes < 1)
throw BadMessageException("short Rreq")
buf.readByte() match {
case 0 => RreqOk(tag, buf.slice())
case 1 => RreqError(tag, decodeUtf8(buf))
case 2 => RreqNack(tag)
case _ => throw BadMessageException("invalid Rreq status")
}
}
private def decodeTdiscarded(buf: ChannelBuffer) = {
if (buf.readableBytes < 3)
throw BadMessageException("short Tdiscarded message")
val which = ((buf.readByte() & 0xff)<<16) | ((buf.readByte() & 0xff)<<8) | (buf.readByte() & 0xff)
Tdiscarded(which, decodeUtf8(buf))
}
private def decodeTlease(buf: ChannelBuffer) = {
if (buf.readableBytes < 9)
throw BadMessageException("short Tlease message")
val unit: Byte = buf.readByte()
val howMuch: Long = buf.readLong()
Tlease(unit, howMuch)
}
def decode(buf: ChannelBuffer): Message = {
if (buf.readableBytes < 4)
throw BadMessageException("short message")
val head = buf.readInt()
def typ = (head>>24 & 0xff).toByte
val tag = head & 0x00ffffff
typ match {
case Types.Treq => decodeTreq(tag, buf)
case Types.Rreq => decodeRreq(tag, buf)
case Types.Tdispatch => decodeTdispatch(tag, buf)
case Types.Rdispatch => decodeRdispatch(tag, buf)
case Types.Tdrain => Tdrain(tag)
case Types.Rdrain => Rdrain(tag)
case Types.Tping => Tping(tag)
case Types.Rping => Rping(tag)
case Types.Rerr | Types.BAD_Rerr => Rerr(tag, decodeUtf8(buf))
case Types.Tdiscarded | Types.BAD_Tdiscarded => decodeTdiscarded(buf)
case Types.Tlease => decodeTlease(buf)
case bad => throw BadMessageException("bad message type: %d [tag=%d]".format(bad, tag))
}
}
def encode(m: Message): ChannelBuffer = {
if (m.tag < MarkerTag || (m.tag & ~TagMSB) > MaxTag)
throw new BadMessageException("invalid tag number %d".format(m.tag))
val head = Array[Byte](
m.typ,
(m.tag>>16 & 0xff).toByte,
(m.tag>>8 & 0xff).toByte,
(m.tag & 0xff).toByte
)
ChannelBuffers.wrappedBuffer(
ChannelBuffers.wrappedBuffer(head), m.buf)
}
}
| yancl/finagle-6.22.0 | finagle-mux/src/main/scala/com/twitter/finagle/mux/Proto.scala | Scala | apache-2.0 | 14,937 |
package models.services
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.api.services.IdentityService
import models.User
import scala.concurrent.Future
/**
* Handles actions to users.
*/
trait UserService extends IdentityService[User] {
/**
* Saves a user.
*
* @param user The user to save.
* @return The saved user.
*/
def save(user: User, userType: String): Future[User]
/**
* Checks if a user exists.
*
* @param loginInfo The login info of the user to find.
* @return The found user or None if no user for the given login info could be found.
*/
def exists(loginInfo: LoginInfo):Boolean
}
| BBK-SDP-2015-jtomli03/Morphidose2 | app/models/services/UserService.scala | Scala | apache-2.0 | 671 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.