code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package provingground.library import provingground._ import HoTT._, scalahott._ import NatRing.{NatTyp => Nt, _} import spire.implicits._ object SimpleEvensSym { val n = Nt.sym val isEven = "isEven" :: Nt ->: Type val zeroEven = "0even" :: isEven(Literal(0)) val plusTwoEven = "_+2even" :: (n ~>: (isEven(n) ->: isEven(succ(succ(n))))) val double = n :-> (n + n) } object DoubleEvenSym { import SimpleEvensSym._ val thm = n ~>: isEven(double(n)) val hyp = "isEven(double(n))" :: isEven(double(n)) val inductor = NatRing.induc(n :-> isEven(double(n))) val pf = inductor(zeroEven) { n :~> (hyp :-> (plusTwoEven(double(n))(hyp))) } !: thm } object SuccNOrNEvenSym { import SimpleEvensSym._ val claim = n :-> (isEven(n) || isEven(succ(n))) val base = claim(zero).incl1(zeroEven) !: claim(zero) val hyp1 = "n-is-Even" :: isEven(n) val hyp2 = "(n+1)-is-Even" :: isEven(succ(n)) val thm = n ~>: (claim(n)) val step = n :~> { (claim(n).rec(claim(succ(n)))) { hyp1 :-> (claim(succ(n)).incl2(plusTwoEven(n)(hyp1))) } { hyp2 :-> (claim(succ(n)).incl1((hyp2))) } } val inductor = NatRing.induc(claim) val pf = inductor(base)(step) !: thm } object LocalConstImpliesConstSym { import SimpleEvensSym._ val A = "A" :: Type val f = "f" :: Nt ->: A val ass = "assumption" :: n ~>: (f(n) =:= f(succ(n))) val claim = n :-> (f(zero) =:= f(n)) val base = f(zero).refl val hyp = "hypothesis" :: (f(zero) =:= f(n)) val step = hyp :-> { IdentityTyp.trans(A)(f(zero))(f(n))(f(succ(n)))(hyp)(ass(n)) } val thm = n ~>: (claim(n)) val inductor = NatRing.induc(claim) val pf = inductor(base)(n :~> step) !: thm } object EqualityNats { import NatRing._ val n = NatTyp.sym val m = NatTyp.sym val k = NatTyp.sym val A = Type.sym val recNU = NatRing.rec(Type) val recNNU = NatRing.rec(NatTyp ->: Type) val eqm = "Eq(m)" :: NatTyp ->: Type val Eq = recNNU(recNU(One)(n :-> (A :-> (Zero: Typ[Term]))))( m :-> (eqm :-> ( recNU(Zero)(n :-> (A :-> (eqm(n)))) )) ) val eqkk = "Eq(k)(k)" :: Eq(k)(k) val diag = NatRing.induc(k :-> Eq(k)(k))(Star)(k :~> (eqkk :-> eqkk)) val equalmn = "Eq(m)(n)" :: (m =:= n) val eqFamily = IdentityTyp.induc(NatTyp, m :~> (n :~> (equalmn :~> (Eq(m)(n)))))(diag) } import EqualityNats._ object ZeroNotSucc { val thm = n ~>: ((zero =:= succ(n)) ->: Zero) val pf = n :~> (eqFamily(zero)(succ(n))) !: thm val formalPf = "lemma" :: thm } object SuccNotZero { val thm = n ~>: ((succ(n) =:= zero) ->: Zero) val pf = n :~> (eqFamily(succ(n))(zero)) !: thm val formalPf = "lemma" :: thm } object ExercisesNat { val sumZero = n ~>: m ~>: ((sum(n)(m) =:= zero) ->: ((n =:= 0) && (m =:= zero))) val sumZero1 = n ~>: m ~>: ((sum(n)(m) =:= zero) ->: (n =:= 0)) val sumZero2 = n ~>: m ~>: ((sum(n)(m) =:= zero) ->: (m =:= 0)) val zeroLeq = n ~>: (leq(zero)(n)) val leqZero = n ~>: (leq(n)(zero) ->: (n =:= zero)) val leqTrans = n ~>: (m ~>: (k ~>: (leq(n)(m) ->: (leq(m)(k) ->: leq(n)(k))))) val divTrans = n ~>: (m ~>: (k ~>: (divides(n)(m) ->: (divides(m)(k) ->: divides(n)(k))))) val leqAntiSym = n ~>: (m ~>: (leq(n)(m) ->: (leq(m)(n) ->: (m =:= n)))) val divAntiSym = n ~>: (m ~>: (divides(n)(m) ->: (divides(m)(n) ->: (m =:= n)))) }
siddhartha-gadgil/ProvingGround
core/src/main/scala/provingground/library/ExampleTheoremsSymbolic.scala
Scala
mit
3,369
object B { import ATest.* def foo: A = new A(1) }
dotty-staging/dotty
tests/pos/i1137-2/B_2.scala
Scala
apache-2.0
54
package de.kasoki.swtrealtime import scala.collection.immutable.SortedSet object BusStop extends Enumeration { type BusStop = BusStopType case class BusStopType(val code:String, val name:String) extends Val(code) def getBusStopByCode(code:String):BusStopType = { return BusStop.withName(code).asInstanceOf[BusStop.BusStopType] } def getBusStopByName(name:String):BusStopType = { for(busStop <- BusStop.values) { val bs = busStop.asInstanceOf[BusStopType] if(name == bs.name) { return bs } } return null } def names:SortedSet[String] = { for(busStop <- BusStop.values) yield busStop.asInstanceOf[BusStopType].name } protected final def BusStopValue(code:String, name:String):BusStopType = { return BusStopType(code, name) } val AACHENERSTR = BusStopValue("aache", "Aachener Str.") val ADASTR = BusStopValue("adast", "Adastr.") val ADOLPHKOLPINGSTR = BusStopValue("kolpi", "Adolph-Kolping-Str.") val AGROBSTR = BusStopValue("agrob", "Agrobstr.") val ALBEROWEG = BusStopValue("alber", "Alberoweg") val ALTEROLLBAHN = BusStopValue("rollb", "Alte Rollbahn") val ALTENHEIMHAERENWIES = BusStopValue("ALH", "Altenheim Haerenwies") val ALTERORTSKERN = BusStopValue("ortsk", "Alter Ortskern") val AMBEUTELWEG = BusStopValue("beute", "Am Beutelweg") val AMBILDSTOCK = BusStopValue("bilds", "Am Bildstock") val AMHERRENBRUENNCHEN = BusStopValue("hbrue", "Am Herrenbruennchen") val AMKIEWELSBERG = BusStopValue("kiewe", "Am Kiewelsberg") val AMMARIAHOF = BusStopValue("MAH", "Am Mariahof") val AMMOSELKAI = BusStopValue("mosel", "Am Moselkai") val AMMUEHLENTEICH = BusStopValue("teich", "Am Muehlenteich") val AMSANDBACH = BusStopValue("sandb", "Am Sandbach") val AMWASSERTURM = BusStopValue("wturm", "Am Wasserturm") val AMWEIDENGRABENANFANG = BusStopValue("wgran", "Am Weidengraben Anfang") val AMWEIDENGRABENENDE = BusStopValue("WGR", "Am Weidengraben Ende") val AMWEIDENGRABENMITTE = BusStopValue("wgrmi", "Am Weidengraben Mitte") val AMWISSENSCHAFTSPARK = BusStopValue("wpark", "Am Wissenschaftspark") val AMALTENFLUGPLATZ = BusStopValue("flugp", "Am alten Flugplatz") val AMPHITHEATER = BusStopValue("amphi", "Amphitheater") val ANDERFELDPORT = BusStopValue("feldp", "An der Feldport") val ANDERMAESWIES = BusStopValue("maesw", "An der Maeswies") val ANDERPFERDSWEIDE = BusStopValue("weide", "An der Pferdsweide") val ANDERSCHULE = BusStopValue("TRS", "An der Schule") val ANDERZIEGELE = BusStopValue("ziege", "An der Ziegele") val ANDREASSTR = BusStopValue("taran", "Andreasstr.") val ANHEIERSTR = BusStopValue("anhei", "Anheierstr.") val ARENATRIER = BusStopValue("arena", "Arena Trier") val ARNOLDISTR = BusStopValue("arnol", "Arnoldistr.") val AUFMOHRBUESCH = BusStopValue("mohrb", "Auf Mohrbuesch") val AUFDEMKIRCHSPIEL = BusStopValue("kirch", "Auf dem Kirchspiel") val AUFDERBAUSCH = BusStopValue("BAU", "Auf der Bausch") val AUFDERHILL = BusStopValue("HIL", "Auf der Hill") val AUFDERJUENG = BusStopValue("jueng", "Auf der Jueng") val AUGUSTASTR = BusStopValue("augus", "Augustastr.") val AVELERHOF = BusStopValue("avhof", "Avelerhof") val BALDUINSBRUNNENHBF = BusStopValue("baldu", "Balduinsbrunnen/HBF") val BARBARATHERMEN = BusStopValue("barth", "Barbarathermen") val BARBARAUFERROEMERBRUECKE = BusStopValue("baruf", "Barbaraufer/Roemerbruecke") val BEHRINGSTR = BusStopValue("behri", "Behringstr.") val BENEDIKTINERSTR = BusStopValue("bened", "Benediktinerstr.") val BERGSTR = BusStopValue("bergs", "Bergstr.") val BERNHARDSTR = BusStopValue("bernh", "Bernhardstr.") val BERTAR = BusStopValue("berta", "Bertar") val BETTEMBURGSTR = BusStopValue("bette", "Bettemburgstr.") val BIEWERNORD = BusStopValue("bnord", "Biewer-Nord") val BIEWERSUED = BusStopValue("bsued", "Biewer-Sued") val BITBURGERSTR = BusStopValue("bitbu", "Bitburger Str.") val BLUECHERSTR = BusStopValue("bluec", "Bluecherstr.") val BLUETENWEG = BusStopValue("bluet", "Bluetenweg") val BONHOCHWALDSTR = BusStopValue("bohoc", "Bon-Hochwaldstr.") val BONNEUSTR = BusStopValue("boneu", "Bon-Neustr.") val BONTALSPERRENBLICK = BusStopValue("BON", "Bon-Talsperrenblick") val BONIFATIUSSTR = BusStopValue("bonif", "Bonifatiusstr.") val BONNERSTR = BusStopValue("bst", "Bonner Str.") val BRETTENBAC = BusStopValue("brett", "Brettenbac") val BRUCHHAUSENSTR = BusStopValue("bhaus", "Bruchhausenstr.") val BUCHENWEG = BusStopValue("buche", "Buchenweg") val BURGMUEHLENSTR = BusStopValue("burgm", "Burgmuehlenstr.") val CASPAROLEVIANSTR = BusStopValue("caspa", "Caspar-Olevian-Str.") val CASTELFORT = BusStopValue("CAS", "Castelfort") val CHARLOTTENSTR = BusStopValue("charl", "Charlottenstr") val CLARAVIEBIGSTR = BusStopValue("viebi", "Clara-Viebig-Str") val CUSANUSSTR = BusStopValue("cusan", "Cusanusstr") val DEUTSCHHERRENSTR = BusStopValue("deuts", "Deutschherrenstr") val DIEDENHOFENERSTR = BusStopValue("diede", "Diedenhofener Str.") val DOMAENENSTR = BusStopValue("domae", "Domaenenstr") val DONAUSTR = BusStopValue("donau", "Donaustr") val EHRANGBAHNHOF = BusStopValue("egbhf", "Ehrang-Bahnhof") val EHRANGHINTEREHEIDE = BusStopValue("EGH", "Ehrang-Hintere Heide") val EHRANGSCHULE = BusStopValue("egsch", "Ehrang-Schule") val EHRANGSPORTPLATZ = BusStopValue("sport", "Ehrang-Sportplatz") val EHRANGUNTERFUEHRUNG = BusStopValue("EGU", "Ehrang-Unterfuehrung") val EHRANGERSTR = BusStopValue("egstr", "Ehranger Str") val EISENBAHNSTR = BusStopValue("eisen", "Eisenbahnstr") val EITELSBACHMITTE = BusStopValue("eitel", "Eitelsbach Mitte") val EURENFRIEDHOF = BusStopValue("EUF", "Euren-Friedhof") val EURENHELENENBRUNNEN = BusStopValue("EUH", "Euren-Helenenbrunnen") val EURENLUSTEINBSTR = BusStopValue("EUL", "Euren-Lu.Steinb.Str") val HOCHSCHULETRIER = BusStopValue("HST", "Hochschule Trier") val FANDELBOR = BusStopValue("fande", "Fandelbor") val FELDSTRMUTTERHAUS = BusStopValue("felds", "Feldstr./Mutterhaus") val FEYENGRAFSCHAFT = BusStopValue("FEY", "Feyen-Grafschaft") val FILSCHMITTE = BusStopValue("fimit", "Filsch Mitte") val FILSCHERHAEUSCHE = BusStopValue("FIL", "Filscher Haeusche") val FISCHWEG = BusStopValue("fiweg", "Fischweg") val FRANZBUSSSTR = BusStopValue("fbuss", "Franz-Buss-Str.") val FRANZGEORGSTR = BusStopValue("fgeor", "Franz-Georg-Str.") val FREIHERRVSTEINSTR = BusStopValue("freih", "Freiherr-v.Stein-Str.") val FRIEDHOFSTR = BusStopValue("fried", "Friedhofstr") val FRITZQUANTSTR = BusStopValue("quant", "Fritz-Quant-Str") val FRITZVONWILLESTR = BusStopValue("wille", "Fritz-von-Wille-Str") val FROEBELSTR = BusStopValue("froeb", "Froebelstr") val FROEHLICHERSTR = BusStopValue("froeh", "Froehlicherstr") val GAMBRINUSSTR = BusStopValue("gambr", "Gambrinusstr") val GARTENFELDSTR = BusStopValue("gfeld", "Gartenfeldstr") val GEORGSTR = BusStopValue("georg", "Georgstr") val GILBERTSTR = BusStopValue("GIL", "Gilbertstr") val GOTTBILLSTR = BusStopValue("VB", "Gottbillstr") val GRAFREGINARSTR = BusStopValue("grafr", "Graf-Reginar-Str") val GUSIMBILSEN = BusStopValue("gubil", "Gus-Im Bilsen") val GUSIMFLUERCHE = BusStopValue("guflu", "Gus-Im Fluerche") val GUSLAERCHENWEG = BusStopValue("gulae", "Gus-Laerchenweg") val GUSLINDENSTR = BusStopValue("gulin", "Gus-Lindenstr") val GUSROMIK = BusStopValue("gurom", "Gus-Romik") val GUSRUWERSTR = BusStopValue("guruw", "Gus-Ruwerstr") val GUSSCHULE = BusStopValue("gusch", "Gus-Schule") val GUSSOMMERAUERSTR = BusStopValue("gusom", "Gus-Sommerauer Str") val GUSTRIERERSTR = BusStopValue("GUS", "Gus-Trierer Str") val GUTENBERGSTR = BusStopValue("ZEG", "Gutenbergstr") val GUTWEILERIMBOORFELD = BusStopValue("GUT", "Gutweiler-Im Boorfeld") val GUTWEILERKIRCHE = BusStopValue("gtkir", "Gutweiler-Kirche") val HAENDELSTR = BusStopValue("haend", "Haendelstr") val HAFENSTR = BusStopValue("hafst", "Hafenstr") val HANDWERKSKAMMER = BusStopValue("HWK", "Handwerkskammer") val HANSBOECKLERALLE = BusStopValue("hansb", "Hans-Boeckler-Alle") val HANSMSCHLEYERSTR = BusStopValue("hansm", "Hans-M.Schleyer-Str") val HAUPTBAHNHOF = BusStopValue("HBF", "Hauptbahnhof") val HAUPTFRIEDHOF = BusStopValue("haupt", "Hauptfriedhof") val HEIDESTUFE = BusStopValue("heist", "Heidestufe") val HEILIGKTREVERERST = BusStopValue("HKT", "Heiligk.-Trevererst") val HELENENSTR = BusStopValue("helen", "Helenenstr") val HINZENBURGMITTE = BusStopValue("HIN", "Hinzenburg Mitte") val HOCHWALDSTR = BusStopValue("hochw", "Hochwaldstr") val HOCKWEILERMITTE = BusStopValue("HOK", "Hockweiler Mitte") val HOEHENFRIEDHOF = BusStopValue("hofri", "Hoehenfriedhof") val HOFBER = BusStopValue("hofbe", "Hofber") val HOLZERATHMITTE = BusStopValue("holze", "Holzerath Mitte") val HONTHEIMSTR = BusStopValue("honth", "Hontheimstr") val HOSPITALSMUEHL = BusStopValue("hospi", "Hospitalsmuehl") val HUNSRUECKSTR = BusStopValue("hunsr", "Hunsrueckstr") val IGELAMFEILENKREU = BusStopValue("igfei", "Igel-Am Feilenkreu") val IGELAMRODERKAM = BusStopValue("igrod", "Igel-Am Roderkam") val IGELAMSCHLEIDBER = BusStopValue("igsbe", "Igel-Am Schleidber") val IGELIGELERSAEUL = BusStopValue("igsae", "Igel-Igeler Saeul") val IGELKIRCHE = BusStopValue("igkir", "Igel-Kirche") val IGELLAUBENSTR = BusStopValue("iglau", "Igel-Laubenstr") val IGELMOSELSTR = BusStopValue("IGL", "Igel-Moselstr") val IGELROEMERSTR = BusStopValue("igroe", "Igel-Roemerstr") val IGELSCHAUINSLAN = BusStopValue("igsch", "Igel-Schauinslan") val IGELWALDSTR = BusStopValue("igwa", "Igel-Waldstr") val IMAVELERTAL = BusStopValue("avtal", "Im Avelertal") val IMHOPFENGARTEN = BusStopValue("HOP", "Im Hopfengarten") val IMPAULINSGARTEN = BusStopValue("RUP", "Im Paulinsgarten") val IMPFLANZGARTEN = BusStopValue("pflan", "Im Pflanzgarten") val IMSCHAMMA = BusStopValue("scham", "Im Schamma") val IMSIEBENBOR = BusStopValue("siebe", "Im Siebenbor") val INDUSTRIEGEBZEWEN = BusStopValue("IZE", "Industriegeb-Zewen") val IRSCHABZWEI = BusStopValue("irabz", "Irsch Abzwei") val IRSCHHOCKWEILERSTR = BusStopValue("IRH", "Irsch-Hockweilerstr") val IRSCHPROPSTE = BusStopValue("IRP", "Irsch-Propste") val IRSCHERBERG = BusStopValue("iberg", "Irscher Berg") val IRSCHERHOF = BusStopValue("irhof", "Irscher Hof") val IRSCHERMUEHLE = BusStopValue("irmue", "Irscher Muehle") val IRSCHERSTR = BusStopValue("irstr", "Irscher Str.") val ISBRUNNEN = BusStopValue("isbru", "Is-Brunnen") val ISBRUNNENSTR = BusStopValue("isbst", "Is-Brunnenstr") val ISZURKIESGRUBE = BusStopValue("iskie", "Is-Zur Kiesgrube") val JAKOBKNEIPSTR = BusStopValue("KEJ", "Jakob-Kneip-Str") val JANUARIUSZICKSTR = BusStopValue("jzick", "Januarius-Zick-Str") val JOSEPHHAYDNSTR = BusStopValue("haydn", "Joseph-Haydn-Str") val JUNGENWALD = BusStopValue("jwald", "Jungenwald") val KAISERTHERMEN = BusStopValue("KTH", "Kaiserthermen") val KANTSTR = BusStopValue("kants", "Kantstr") val KANZELSTR = BusStopValue("ZEK", "Kanzelstr") val KARLMARXHAUS = BusStopValue("KMH", "Karl-Marx-Haus") val KARLSWEG = BusStopValue("karls", "Karlsweg") val KARRENBACH = BusStopValue("karre", "Karrenbach") val KASBAHNHOF = BusStopValue("kabhf", "Kas-Bahnhof") val KASBAHNHOFSTR = BusStopValue("kabst", "Kas-Bahnhofstr") val KASGEMEINDEPLAT = BusStopValue("KAG", "Kas-Gemeindeplat") val KASHAUSTIMPER = BusStopValue("katim", "Kas-Haus Timper") val KASSCHULE = BusStopValue("kasch", "Kas-Schule") val KASSPORTPLAT = BusStopValue("kaspo", "Kas-Sportplat") val KASWASSERHAEUSCHE = BusStopValue("kawas", "Kas-Wasserhaeusche") val KENNERWEG = BusStopValue("kenne", "Kenner Weg") val KERNSCHHOEHENWEG1 = BusStopValue("hoeh1", "Kernsch.Hoehenweg 1") val KERNSCHHOEHENWEG2 = BusStopValue("hoeh2", "Kernsch.Hoehenweg 2") val KERNSCHEIDKIRCHE = BusStopValue("KER", "Kernscheid-Kirche") val KEUNEGRUNDSCHULE = BusStopValue("KEU", "Keune Grundschule") val KLEEBURGERWEG = BusStopValue("OLK", "Kleeburger Weg") val KOHLENSTR = BusStopValue("kohle", "Kohlenstr") val KOLONNENWEG = BusStopValue("KOL", "Kolonnenweg") val KONRADADENAUERBRUECKE = BusStopValue("adebr", "Konrad-Adenauer-Bruecke") val KONSTANTINBASILIK = BusStopValue("KON", "Konstantin Basilik") val KONZERSTR = BusStopValue("konze", "Konzer Str") val KORLNEUSTR = BusStopValue("koneu", "Korl-Neustr") val KORLZUMSTEINBRUCH = BusStopValue("koste", "Korl-Zum Steinbruch") val KREUZFLUR = BusStopValue("kreuz", "Kreuzflur") val KUERENZERSTR = BusStopValue("KUE", "Kuerenzer Str") val KUNOSTAPELSTR = BusStopValue("kunos", "Kuno-Stapel-Str") val KURFUERSTENSTR = BusStopValue("kurfu", "Kurfuerstenstr") val KYRIANDERSTR = BusStopValue("kyria", "Kyrianderstr") val LERHARDRINGAUSST = BusStopValue("ludwi", "L.Erhard-Ring,Ausst") val LAMBERTISTR = BusStopValue("lambe", "Lambertistr") val LANIMBUNGER = BusStopValue("lgbun", "Lan-Im Bunger") val LANKIRCH = BusStopValue("lgkir", "Lan-Kirch") val LANLOEWENERMUEHLE = BusStopValue("lgloe", "Lan-Loewener Muehle") val LANMESENICHERSTR = BusStopValue("lgmes", "Lan-Mesenicher Str") val LANNEUSTR = BusStopValue("lgneu", "Lan-Neustr") val LANWASSERBILLIGERB = BusStopValue("lgwas", "Lan-Wasserbilligerb") val LANGFLUR = BusStopValue("langf", "Langflur") val LAYWEG = BusStopValue("EGL", "Layweg") val LEBENSHILFE = BusStopValue("LWT", "Lebenshilfe") val LENUSMARSSTR = BusStopValue("lenus", "Lenus-Mars-Str") val LEVELINGSTR = BusStopValue("level", "Levelingstr") val LIERSBERGHEINTZHOF = BusStopValue("lihei", "Liersberg-Heintzhof") val LIERSBERGKIRCHE = BusStopValue("LIE", "Liersberg-Kirche") val LOEBSTR = BusStopValue("loebs", "Loebstr") val LUDWIGSIMONSTR = BusStopValue("simon", "Ludwig-Simon-Str") val LUXEMBURGERSTR = BusStopValue("luxem", "Luxemburger Str") val MAARSTR = BusStopValue("maars", "Maarstr") val MAEUSHECKERSCHULE = BusStopValue("MHS", "Maeusheckerschule") val MAEUSHECKERWE = BusStopValue("MHW", "Maeusheckerwe") val MAGNERICHSTR = BusStopValue("magne", "Magnerichstr") val MARIENHOLZSTR = BusStopValue("mholz", "Marienholzstr") val MARIENKRANKENHAUS = BusStopValue("mkrhs", "Marienkrankenhaus") val MARIENSAEULE = BusStopValue("saeul", "Mariensaeule") val MARKUSBER = BusStopValue("mberg", "Markusber") val MARKUSSTR = BusStopValue("marku", "Markusstr") val MARTINSCHUNCKSTR = BusStopValue("schun", "Martin-Schunck-Str") val MARTINSTR = BusStopValue("marti", "Martinstr") val MAXIMINERACH = BusStopValue("maxim", "Maximinerach") val MEILENSTR = BusStopValue("meile", "Meilenstr") val MERTABZWEITELSBACH = BusStopValue("OVB", "Mert-Abzw.Eitelsbach") val MERTBERGSTR = BusStopValue("meber", "Mert-Bergstr") val MERTDORFBRUNNEN = BusStopValue("medor", "Mert-Dorfbrunnen") val MERTEICHGRABEN = BusStopValue("MEE", "Mert-Eichgraben") val MERTGRUENHAUS = BusStopValue("megru", "Mert-Gruenhaus") val MERTSTADIONSTR = BusStopValue("mesta", "Mert-Stadionstr") val MERTZURFESTUNG = BusStopValue("mefes", "Mert-Zur Festung") val MESENICHKIRCHE = BusStopValue("mskir", "Mesenich-Kirche") val MESENICHTRIERERSTR = BusStopValue("mstri", "Mesenich-Trierer Str.") val MESSEPARKP_PLUS = BusStopValue("MES", "Messepark P+") val METZERALLE = BusStopValue("MEZ", "Metzer Alle") val MOHRENKOP = BusStopValue("MOR", "Mohrenkop") val MONAISERSTR = BusStopValue("monai", "Monaiser Str") val MORSCHEIDMITT = BusStopValue("MOS", "Morscheid Mitt") val MOSELSTADIO = BusStopValue("stadio", "Moselstadio") val MUEHLENSTR = BusStopValue("muehl", "Muehlenstr") val MUSTORSTR = BusStopValue("musto", "Mustorstr") val MZKIRCHE = BusStopValue("MZK", "Mz-Kirche") val MZWINTERSDORFERSTR = BusStopValue("mzwin", "Mz-Wintersdorfer Str") val NELLSPARK = BusStopValue("NPA", "Nells Park") val NELLSPARKPARKSTR = BusStopValue("npapa", "Nells Park/Parkstr") val NELLSTR = BusStopValue("nells", "Nellstr") val NIEDERKIRCHERSTR = BusStopValue("nieder", "Niederkircher Str") val NIEDERSTR = BusStopValue("niest", "Niederstr") val NIKOLAUSKOCHPLATZ = BusStopValue("NIK", "Nikolaus-Koch-Platz") val NORDALLEEKRANKENHAUS = BusStopValue("norda", "Nordallee/Krankenhaus") val NORDBAD = BusStopValue("nordb", "Nordbad") val P_AND_R_TRIERNORD = BusStopValue("prnor", "P+R Trier Nord") val PALLIENERSTR = BusStopValue("palli", "Palliener Str") val PELLINGERSTR = BusStopValue("pelli", "Pellinger Str") val PETERROTHPLATZ = BusStopValue("PRP", "Peter-Roth-Platz") val PETERWUSTSTR = BusStopValue("pwust", "Peter-Wust-Str") val PETRISBERG = BusStopValue("PET", "Petrisberg") val PETRISBERGAUSSICH = BusStopValue("aussi", "Petrisberg-Aussich") val PFALZELBAHNHOF = BusStopValue("pfbhf", "Pfalzel-Bahnhof") val PFALZELELTZSTR = BusStopValue("PFA", "Pfalzel-Eltzstr") val PFERDEMARK = BusStopValue("markt", "Pferdemark") val PHILOSOPHENWEG = BusStopValue("philo", "Philosophenweg") val PLUBAHNHOF = BusStopValue("plbhf", "Plu-Bahnhof") val PLUBAHNHOFSTR = BusStopValue("plbst", "Plu-Bahnhofstr") val PLUGUSTERATHERSTR = BusStopValue("plgus", "Plu-Gusterather Str") val PLUINDWEIDENWIESE = BusStopValue("plwei", "Plu-In d. Weidenwiese") val PLURUWERSTR = BusStopValue("PLU", "Plu-Ruwerstr") val PORTANIGRA = BusStopValue("PN", "Porta Nigra") val QUINTSCHLOSS = BusStopValue("QUI", "Quint-Schloss") val QUINTERSTR = BusStopValue("quist", "Quinter Str") val RATHAUSSTADTTHEATER = BusStopValue("ratha", "Rathaus/Stadttheater") val REMIGIUSSTR = BusStopValue("remig", "Remigiusstr") val RETZGRUBENWEG = BusStopValue("retzg", "Retzgrubenweg") val RINGSTR = BusStopValue("rings", "Ringstr") val RIVBAUSBACH = BusStopValue("ribau", "Riv-Bausbach") val RIVMORSCHEIDERST = BusStopValue("rimor", "Riv-Morscheider Str.") val RIVERISMITTE = BusStopValue("rimit", "Riveris Mitte") val ROEMERSTR = BusStopValue("roest", "Roemerstr") val ROTBACHSTR = BusStopValue("ROT", "Rotbachstr") val ROTHILDISSTR = BusStopValue("rothi", "Rothildisstr") val RUBENSTR = BusStopValue("ruben", "Rubenstr") val RUDOLFDIESELSTR = BusStopValue("diese", "Rudolf-Diesel-Str") val RUWERBAHNHOF = BusStopValue("rubhf", "Ruwer-Bahnhof") val RUWERSPORTPLATZ = BusStopValue("RUS", "Ruwer-Sportplatz") val RUWERERSTR = BusStopValue("rustr", "Ruwererstr") val SWTSTADTWERKETRIER = BusStopValue("swtri", "SWT Stadtwerke Trier") val SCHEFFELSTR = BusStopValue("schef", "Scheffelstr") val SCHIFFSTR = BusStopValue("schif", "Schiffstr") val SCHLOSSSTR = BusStopValue("schlo", "Schlossstr") val SCHOENBORNSTR = BusStopValue("schoe", "Schoenbornstr") val SCHUETZENSTR = BusStopValue("schue", "Schuetzenstr") val SCHWARZERWEG = BusStopValue("schwa", "Schwarzer Weg") val SDALTENWEG = BusStopValue("sdalt", "Sd-Altenweg") val SDHAUPTSTR = BusStopValue("sdhau", "Sd-Hauptstr") val SDKIRCHE = BusStopValue("sdkir", "Sd-Kirche") val SDPLUWIGERHAMME = BusStopValue("plham", "Sd-Pluwigerhammer") val SEIFERSTR = BusStopValue("seife", "Seiferstr") val SPIROSTR = BusStopValue("spiro", "Spirostr") val STMATTHIA = BusStopValue("MAT", "St. Matthia") val STMEDAR = BusStopValue("MED", "St. Medar") val STPAULI = BusStopValue("pauli", "St. Pauli") val STURBA = BusStopValue("urban", "St. Urba") val STANNASTR = BusStopValue("annas", "St.-Anna-Str") val STMATTHIASAULSTR = BusStopValue("matau", "St.-Matthias-Aulstr") val STMATTHIASKIRCH = BusStopValue("matki", "St.-Matthias-Kirch") val STMERGENERSTR = BusStopValue("merge", "St.-Mergener-Str") val STADTBIBLIOTHE = BusStopValue("bibli", "Stadtbibliothe") val STEFANANDRESSTR = BusStopValue("andre", "Stefan-Andres-Str") val STEINBRUECKSTR = BusStopValue("stein", "Steinbrueckstr") val STEINSWEG = BusStopValue("stweg", "Steinsweg") val STRASSBURGERALLE = BusStopValue("strass", "Strassburger Alle") val SUEDALLEEKAISERSTR = BusStopValue("SUE", "Suedallee/Kaiserstr") val SUEDBAHNHOF = BusStopValue("SBF", "Suedbahnhof") val SUEDFRIEDHOF = BusStopValue("suedf", "Suedfriedhof") val SWALTESWEINHAU = BusStopValue("swwei", "Sw-Altes Weinhau") val SWBAHNHOFSTR = BusStopValue("swbah", "Sw-Bahnhofstr") val SWBERTRADASTR = BusStopValue("swber", "Sw-Bertradastr") val SWBRUNNE = BusStopValue("swbru", "Sw-Brunne") val SWERMESGRABE = BusStopValue("swerm", "Sw-Ermesgrabe") val SWLANGFUHRSTR = BusStopValue("SWE", "Sw-Langfuhrstr") val TARFKCARSTENSST = BusStopValue("TAK", "Tarf.-K.Carstens-St") val TARFLERHARDRIN = BusStopValue("TAL", "Tarf.-L.Erhard-Rin") val TARFORSTERSTR = BusStopValue("tarst", "Tarforster Str") val TAUBENBER = BusStopValue("taube", "Taubenber") val TEMPELHERRENSTR = BusStopValue("teher", "Tempelherrenstr") val TEMPELWE = BusStopValue("teweg", "Tempelwe") val THEODORHEUSSALLE = BusStopValue("THA", "Theodor-Heuss-Alle") val THYRSUSSTR = BusStopValue("thyrs", "Thyrsusstr") val TIERGARTE = BusStopValue("OLT", "Tiergarte") val TOEPFERSTR = BusStopValue("toepf", "Toepferstr") val TREBETASTR = BusStopValue("trebe", "Trebetastr") val TREVIRS = BusStopValue("TRV", "Treviris") val TRIERGALERIE = BusStopValue("galer", "Trier Galerie") val TRIERERHAFEN = BusStopValue("hafen", "Trierer Hafen") val TRIERWEILERWEG = BusStopValue("TWW", "Trierweilerweg") val TRIMHOFAUGUSTSTR = BusStopValue("TRH", "Trim.Hof-August.str") val UNIVERSITAET = BusStopValue("UNI", "Universitaet") val UNIVERSITAETCAMPUS = BusStopValue("UC2", "Universitaet Campus") val UNIVERSITAETHAUPTG = BusStopValue("UNH", "Universitaet Hauptg") val UNIVERSITAETMENSA = BusStopValue("UNM", "Universitaet Mensa") val UNIVERSITAETPARKPLATZ = BusStopValue("unipa", "Universitaet Parkplatz") val UNIVERSITAETSUED = BusStopValue("UNS", "Universitaet Sued") val VBREALMARK = BusStopValue("vbrea", "VB-Real-Mark") val VBTESTPLAT = BusStopValue("vbtes", "VB-Testplat") val VBTOR = BusStopValue("vbto3", "VB-Tor ") val VBZENTRAL = BusStopValue("vbzen", "VB-Zentral") val VILLAREVERCHO = BusStopValue("villa", "Villa Revercho") val VORDEREHEID = BusStopValue("vheid", "Vordere Heid") val WALDAMSPORTPLAT = BusStopValue("WAL", "Wald-Am Sportplat") val WALDAUFDEMFLUERCHE = BusStopValue("waflu", "Wald-Auf dem Fluerche") val WALDBAHNHOF = BusStopValue("wabhf", "Wald-Bahnhof") val WALDGEMEINDEPLAT = BusStopValue("wagem", "Wald-Gemeindeplat") val WALDGEWERBEGEBIE = BusStopValue("wagew", "Wald-Gewerbegebie") val WALDINDERACH = BusStopValue("waach", "Wald-In der Ach") val WALDINDERLA = BusStopValue("walay", "Wald-In der La") val WALDOBEREKIRCHST = BusStopValue("wakir", "Wald-Obere Kirchst") val WALDPAETSCHERWE = BusStopValue("wapae", "Wald-Paetscher We") val WALDROSENBUNGER = BusStopValue("waros", "Wald-Rosenbunger") val WALDSCHUL = BusStopValue("wasch", "Wald-Schul") val WALDWEINBERGSWEG = BusStopValue("wawei", "Wald-Weinbergsweg") val WALDZOLLWE = BusStopValue("wazol", "Wald-Zollwe") val WALDSTR = BusStopValue("walds", "Waldstr") val WALLENBACHSTR = BusStopValue("walle", "Wallenbachstr") val WASSERBILLIGERSTR = BusStopValue("wbill", "Wasserbilliger Str") val WASSERWE = BusStopValue("waweg", "Wasserwe") val WEISMARKPFAHLWEIHE = BusStopValue("WEI", "Weismark-Pfahlweihe") val WENZELBACHSTR = BusStopValue("wenze", "Wenzelbachstr") val WERDINGSTR = BusStopValue("werdi", "Werdingstr") val WESTBAHNHO = BusStopValue("WBF", "Westbahnho") val WILHLEUSCHNERSTR = BusStopValue("WLE", "Wilh.-Leuschner-Str") val WINKIRCH = BusStopValue("WDK", "Win-Kirch") val WINLINDENSTR = BusStopValue("wdlin", "Win-Lindenstr") val WINWEINBERGSWE = BusStopValue("wdwei", "Win-Weinbergswe") val WISPORTSTR = BusStopValue("wispo", "Wisportstr") val WOLFSBER = BusStopValue("WOB", "Wolfsber") val WOLKERSTR = BusStopValue("wolke", "Wolkerstr") val ZEWENFRIEDHO = BusStopValue("ZEF", "Zewen-Friedho") val ZUMHELLBER = BusStopValue("hellb", "Zum Hellber") val ZUMROEMERSPRUDE = BusStopValue("sprud", "Zum Roemersprude") val ZURLAUBENERUFE = BusStopValue("ZUR", "Zurlaubener Ufe") val ZURMAIENERSTRNORD = BusStopValue("zurma", "Zurmaiener Str.Nord") }
kasoki/swt-realtime
src/main/scala/de/kasoki/swtrealtime/BusStop.scala
Scala
mit
24,940
package infra.piece.core import scala.concurrent.{Future, ExecutionContext} import java.io.InputStream import play.api.mvc.{SimpleResult, Result} /** * @author alari ([email protected]) * @since 07.05.14 13:47 */ trait FileStorage { def store(file: Pieces.File, userId: String)(implicit ec: ExecutionContext): Future[String] def store(is: InputStream, filename: String, contentType: Option[String], userId: String)(implicit ec: ExecutionContext): Future[String] def url(fileId: String): String def download(fileId: String)(implicit ec: ExecutionContext): Future[SimpleResult] }
alari/play-content
module-code/app/infra/piece/core/FileStorage.scala
Scala
mit
597
/* * Copyright 2001-2012 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest /** * Arguments bundle passed to four of ScalaTest's lifecycle methods: <code>run</code>, <code>runNestedSuites</code>, * <code>runTests</code>, and <code>runTest</code>. * * <p> * The signatures of these methods, defined in trait <a href="Suite.html"><code>Suite</code></a>, are: * </p> * * <pre> * def run(testName: Option[String], args: Args) * def runNestedSuites(args: Args) * def runTests(testName: Option[String], args: Args) * def runTess(testName: String, args: Args) * </pre> * * <p> * The purpose of bundling these arguments into an <code>Args</code> object instead of passing them in individually is to make the signature * of these four lifecycle methods easier to read, and write, and remember, to make the methods more pleasant to override in user code. * </p> * * @param reporter the <code>Reporter</code> to which results will be reported * @param stopper the <code>Stopper</code> that will be consulted to determine whether to stop execution early. * @param filter a <code>Filter</code> with which to filter tests based on their tags * @param configMap a <code>Map</code> of key-value pairs that can be used by the executing <code>Suite</code> of tests. * @param distributor an optional <code>Distributor</code>, into which to put nested <code>Suite</code>s to be executed * by another entity, such as concurrently by a pool of threads. If <code>None</code>, nested <code>Suite</code>s will be executed sequentially. * @param tracker a <code>Tracker</code> tracking <code>Ordinal</code>s being fired by the current thread. * @param chosenStyles a (possibly empty) <code>Set</code> of <code>String</code>s specifying the run's <a href="tools/Runner$.html#specifyingChosenStyles"><em>chosen styles</em></a> * @param runTestInNewInstance a flag used to pass information between run methods * in <a href="OneInstancePerTest.html"><code>OneInstancePerTest</code></a> and <a href="ParallelTestExecution.html"><code>ParallelTestExecution</code></a>. * @param distributedTestSorter an optional <a href="DistributedTestSorter.html"><code>DistributedTestSorter</code></a> used by <a href="ParallelTestExecution.html"><code>ParallelTestExecution</code></a> to sort the events * for the parallel-executed tests of one suite back into sequential order on the fly, with a timeout in case a test takes too long to complete * @param distributedSuiteSorter an optional <a href="DistributedSuiteSorter.html"><code>DistributedSuiteSorter</code></a> used by <a href="ParallelTestExecution.html"><code>ParallelTestExecution</code></a> to ensure the events * for the parallel-executed suites are sorted back into sequential order, with a timeout in case a suite takes to long to complete, even when tests are executed in parallel * * @throws NullPointerException if any passed parameter is <code>null</code>. * */ case class Args( reporter: Reporter, stopper: Stopper = Stopper.default, filter: Filter = Filter.default, configMap: Map[String, Any] = Map.empty, distributor: Option[Distributor] = None, tracker: Tracker = Tracker.default, chosenStyles: Set[String] = Set.empty, runTestInNewInstance: Boolean = false, distributedTestSorter: Option[DistributedTestSorter] = None, distributedSuiteSorter: Option[DistributedSuiteSorter] = None ) { if (reporter == null) throw new NullPointerException("reporter was null") if (stopper == null) throw new NullPointerException("stopper was null") if (filter == null) throw new NullPointerException("filter was null") if (configMap == null) throw new NullPointerException("configMap was null") if (distributor == null) throw new NullPointerException("distributor was null") if (tracker == null) throw new NullPointerException("tracker was null") if (chosenStyles == null) throw new NullPointerException("chosenStyles was null") if (distributedTestSorter == null) throw new NullPointerException("distributedTestSorter was null") if (distributedSuiteSorter == null) throw new NullPointerException("distributedSuiteSorter was null") }
hubertp/scalatest
src/main/scala/org/scalatest/Args.scala
Scala
apache-2.0
4,786
package libjoe.roshi import com.redis.RedisClient case class RedisServerAddress(host: String, port: Int) object RedisServerAddress { implicit def redisServerToClient(rs: RedisServerAddress): RedisClient = new RedisClient(rs.host, rs.port) }
joekearney/roshi-scala
src/main/scala/libjoe/roshi/RedisServerAddress.scala
Scala
apache-2.0
245
/* __ *\\ ** ________ ___ / / ___ Scala API ** ** / __/ __// _ | / / / _ | (c) 2003-2010, LAMP/EPFL ** ** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ ** ** /____/\\___/_/ |_/____/_/ | | ** ** |/ ** \\* */ package scala.concurrent /** <p> * Library for using Pi-calculus concurrent primitives in * <a href="http://scala-lang.org/" target="_top">Scala</a>. As an * example, the definition of a two-place buffer using the <code>pilib</code> * library looks like: * </p><pre> * <b>def</b> Buffer[a](put: Chan[a], get: Chan[a]) { * <b>def</b> B0 { choice ( put * { x => B1(x) } ) } * <b>def</b> B1(x: a) { choice ( get(x) * B0, put * { y => B2(x, y) } ) } * <b>def</b> B2(x: a, y: a) { choice ( get(x) * B1(y) ) } * B0 * } * </pre> * * @see <a href="http://scala-lang.org/docu/papers.html" target="_top"> * PiLib: A Hosted Language for Pi-Calculus Style Concurrency</a> * @author Vincent Cremet, Martin Odersky * @version 1.0 */ object pilib { import TaskRunners.threadRunner //////////////////////////////// SPAWN ///////////////////////////////// /** * Run several processes in parallel using the following syntax: * <code>spawn &lt; p<sub>1</sub> | ... | p<sub>n</sub> &gt;</code> */ abstract class Spawn { def <(p: => Unit): Spawn def |(p: => Unit): Spawn def > : Unit } val spawn = new Spawn { //object spawn extends Spawn { // BUG ! def <(p: => Unit): Spawn = { scala.concurrent.ops.spawn(p); this } def |(p: => Unit): Spawn = { scala.concurrent.ops.spawn(p); this } def > : Unit = () } /////////////////////////// GUARDED PROCESSES ////////////////////////// /** Untyped channel. */ class UChan { /** Default log function. */ var log = (x: Any) => () } /** An untyped guarded process. * * @param n channel name * @param polarity input (true) or output (false) * @param v transmitted value * @param c continuation */ case class UGP(n: UChan, polarity: Boolean, v: Any, c: Any => Any) /** Typed guarded process. */ class GP[a](n: UChan, polarity: Boolean, v: Any, c: Any => a) { val untyped = UGP(n, polarity, v, c) } //////////////////////////////// CHANNELS ////////////////////////////// /** * Name on which one can emit, receive or that can be emitted or received * during a communication. */ class Chan[A] extends UChan with Function1[A, Product[A]] { var defaultValue: A = _ /** Creates an input guarded process. */ def input[B](c: A => B) = new GP(this, true, (), x => c(x.asInstanceOf[A])) /** Creates an input guarded process. */ def output[B](v: A, c: () => B) = new GP(this, false, v, x => c()) /** Blocking read. */ def read = { var res: A = defaultValue choice ( input(x => res = x) ) res } /** Blocking write. */ def write(x: A) = choice ( output(x, () => ()) ) /** Syntactic sugar for input. */ def *[B](f: A => B) = input(f) /** Syntactic sugar for output. */ def apply(v: A) = new Product(this, v) /** Attach a function to be evaluated at each communication event * on this channel. Replace previous attached function. */ def attach(f: A => Unit) = log = x => f(x.asInstanceOf[A]) } class Product[A](c: Chan[A], v: A) { def *[B](f: => B) = c.output(v, () => f) } /////////////////////// SUM OF GUARDED PROCESSES /////////////////////// case class Sum(gs: List[UGP]) { /** Continuation of the sum. */ var cont: () => Any = _ var initialized = false /** Block if not initialized otherwise continue with the * continuation. */ def continue = synchronized { if (!initialized) wait() cont() } /** Set the values of parameters and awake the sleeping sum. * * @param f ... */ def set(f: () => Any) = synchronized { cont = f initialized = true notify() } } ///////////////////////////// COMMUNICATION //////////////////////////// private var sums: List[Sum] = Nil /** Test if two lists of guarded processes can communicate. * * @param gs1 ... * @param gs2 ... * @return ... */ private def matches(gs1: List[UGP], gs2: List[UGP]): Option[(() => Unit, () => Any, () => Any)] = (gs1, gs2) match { case (Nil, _) => None case (_, Nil) => None case (UGP(a1, d1, v1, c1) :: rest1, UGP(a2, d2, v2, c2) :: rest2) => if (a1 == a2 && d1 == !d2) Some(((() => if (d1) a1.log(v2) else a1.log(v1)), (() => c1(v2)), (() => c2(v1)))) else matches(gs1, rest2) match { case None => matches(rest1, gs2) case Some(t) => Some(t) } } /** Test if the given sum can react with one of the pending sums. * If yes then do the reaction otherwise append the sum at the end * of the pending sums. * * @param s1 ... * @param ss ... * @return ... */ private def compare(s1: Sum, ss: List[Sum]): List[Sum] = ss match { case Nil => ss ::: List(s1) case s2 :: rest => matches(s1.gs, s2.gs) match { case None => s2 :: compare(s1, rest) case Some((log, c1, c2)) => log() s1.set(c1) s2.set(c2) rest } } /** Pi-calculus non-deterministic choice. * * @param s ... * @return ... */ def choice[A](s: GP[A]*): A = { val sum = Sum(s.toList map { _.untyped }) synchronized { sums = compare(sum, sums) } (sum.continue).asInstanceOf[A] } }
cran/rkafkajars
java/scala/concurrent/pilib.scala
Scala
apache-2.0
5,955
/* * Copyright 2015 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.atlas.core.db import com.netflix.atlas.core.model.Block import com.netflix.atlas.core.model.CollectorStats import com.netflix.atlas.core.model.CollectorStatsBuilder import com.netflix.atlas.core.model.ConsolidationFunction import com.netflix.atlas.core.model.DataExpr import com.netflix.atlas.core.util.Math object AggregateCollector { def apply(expr: DataExpr): AggregateCollector = expr match { case by: DataExpr.GroupBy => new GroupByAggregateCollector(by) case _: DataExpr.All => new AllAggregateCollector case _: DataExpr.Sum => new SumAggregateCollector case _: DataExpr.Count => new SumAggregateCollector case _: DataExpr.Min => new MinAggregateCollector case _: DataExpr.Max => new MaxAggregateCollector case DataExpr.Head(e, _) => apply(e) case DataExpr.Consolidation(af, _) => apply(af) } } /** * Collector for computing an aggregate or set of aggregates from a set of metric buffers. */ trait AggregateCollector { /** Add `b` to the aggregate. */ def add(b: TimeSeriesBuffer) /** * Add a block to the aggregate directly. The underlying buffer must be using the same step size * as the block storage. */ def add( tags: Map[String, String], blocks: List[Block], aggr: Int, cf: ConsolidationFunction, multiple: Int, newBuffer: Map[String, String] => TimeSeriesBuffer): Int /** Returns the final set of aggregate buffers. */ def result: List[TimeSeriesBuffer] /** Statistics for this collector. */ def stats: CollectorStats def newCollector(af: DataExpr.AggregateFunction): AggregateCollector = { AggregateCollector(af) } } abstract class SimpleAggregateCollector extends AggregateCollector { import java.util.{BitSet => JBitSet} var buffer = null.asInstanceOf[TimeSeriesBuffer] var valueMask = null.asInstanceOf[JBitSet] var valueMultiple = -1 var valueCount = 0 val statBuffer = new CollectorStatsBuilder protected def aggregate(b1: TimeSeriesBuffer, b2: TimeSeriesBuffer) def add(b: TimeSeriesBuffer) { statBuffer.updateInput(b.values.length) if (!b.isAllNaN) { valueCount = 1 if (buffer == null) { statBuffer.updateOutput(b.values.length) buffer = b.copy } else { aggregate(buffer, b) } } } def add( tags: Map[String, String], blocks: List[Block], aggr: Int, cf: ConsolidationFunction, multiple: Int, newBuffer: Map[String, String] => TimeSeriesBuffer): Int = { if (buffer == null) { buffer = newBuffer(tags) statBuffer.updateOutput(buffer.values.length) if (cf == ConsolidationFunction.Avg && multiple > 1) { valueMask = new JBitSet(buffer.values.length * multiple) valueMultiple = multiple } } statBuffer.updateInput(blocks) val op = aggr match { case Block.Sum => Math.addNaN _ case Block.Count => Math.addNaN _ case Block.Min => Math.minNaN _ case Block.Max => Math.maxNaN _ } blocks.foreach { b => if (valueMask != null) { val v = buffer.aggrBlock(tags, b, aggr, ConsolidationFunction.Sum, multiple, op) buffer.valueMask(valueMask, b, multiple) valueCount += v } else { val v = buffer.aggrBlock(tags, b, aggr, cf, multiple, op) valueCount += v } } buffer.values.length } def result: List[TimeSeriesBuffer] = { if (valueMask != null) { buffer.average(valueMask, valueMultiple) valueMask = null } if (buffer == null || valueCount == 0) Nil else List(buffer) } def stats: CollectorStats = statBuffer.result } /** Collector that returns a single buffer representing the sum of all individual buffers. */ class SumAggregateCollector extends SimpleAggregateCollector { protected def aggregate(b1: TimeSeriesBuffer, b2: TimeSeriesBuffer) = b1.add(b2) } /** Collector that returns a single buffer representing the min of all individual buffers. */ class MinAggregateCollector extends SimpleAggregateCollector { protected def aggregate(b1: TimeSeriesBuffer, b2: TimeSeriesBuffer) = b1.min(b2) } /** Collector that returns a single buffer representing the max of all individual buffers. */ class MaxAggregateCollector extends SimpleAggregateCollector { protected def aggregate(b1: TimeSeriesBuffer, b2: TimeSeriesBuffer) = b1.max(b2) } abstract class LimitedAggregateCollector extends AggregateCollector { protected def checkLimits(numLines: Int, numDatapoints: Int) { check("lines", numLines, Limits.maxLines) check("datapoints", numDatapoints, Limits.maxDatapoints) } private def check(what: String, actual: Int, limit: Int) { require(actual <= limit, s"too many $what: $actual > $limit") } } class GroupByAggregateCollector(ft: DataExpr.GroupBy) extends LimitedAggregateCollector { type KeyType = String private val buffers = collection.mutable.HashMap.empty[KeyType, AggregateCollector] private var bufferSize = -1 def add(b: TimeSeriesBuffer) { // Create key and exit early on failure val k = ft.keyString(b.tags) if (k == null) return // Add the data to the existing collector for the key or create a new one val c = buffers.get(k).getOrElse { checkLimits(buffers.size + 1, (buffers.size + 1) * b.values.length) val collector = newCollector(ft.af) buffers += (k -> collector) collector } c.add(b) } def add( tags: Map[String, String], blocks: List[Block], aggr: Int, cf: ConsolidationFunction, multiple: Int, newBuffer: Map[String, String] => TimeSeriesBuffer): Int = { // Create key and exit early on failure val k = ft.keyString(tags) if (k == null) return 0 // Add the data to the existing collector for the key or create a new one val c = buffers.get(k).getOrElse { if (bufferSize > 0) { checkLimits(buffers.size + 1, (buffers.size + 1) * bufferSize) } val collector = newCollector(ft.af) buffers += (k -> collector) collector } bufferSize = c.add(tags, blocks, aggr, cf, multiple, newBuffer) bufferSize } def result: List[TimeSeriesBuffer] = buffers.values.flatMap(_.result).toList def stats: CollectorStats = { val statBuffer = new CollectorStatsBuilder buffers.values.foreach(c => statBuffer.update(c.stats)) statBuffer.result } } class AllAggregateCollector extends LimitedAggregateCollector { private val builder = List.newBuilder[TimeSeriesBuffer] private var numLines = 0 val statBuffer = new CollectorStatsBuilder def add(b: TimeSeriesBuffer) { numLines += 1 checkLimits(numLines, numLines * b.values.length) statBuffer.updateInput(b.values.length) statBuffer.updateOutput(b.values.length) builder += b.copy } def add( tags: Map[String, String], blocks: List[Block], aggr: Int, cf: ConsolidationFunction, multiple: Int, newBuffer: Map[String, String] => TimeSeriesBuffer): Int = { var valueCount = 0 val buffer = newBuffer(tags) val op = aggr match { case Block.Sum => Math.addNaN _ case Block.Count => Math.addNaN _ case Block.Min => Math.minNaN _ case Block.Max => Math.maxNaN _ } blocks.foreach { b => val v = buffer.aggrBlock(tags, b, aggr, cf, multiple, op) valueCount += v } if (valueCount > 0) add(buffer) buffer.values.length } def result: List[TimeSeriesBuffer] = builder.result def stats: CollectorStats = statBuffer.result }
jasimmk/atlas
atlas-core/src/main/scala/com/netflix/atlas/core/db/AggregateCollector.scala
Scala
apache-2.0
8,271
package firesoft.util object Timer extends App{ def timePrint[R](f: => R): Unit = { println(timeString(f)) } def time[R](f: => R): Long = { val t0 = System.nanoTime() val r = f val t1 = System.nanoTime() val time = (t1-t0) /1000 time } def timeString[R](f: => R): String = { val t0 = System.nanoTime() val r = f val t1 = System.nanoTime() val nanos = (t1-t0) val ms = nanos /1000000d val st = ms.toString.slice(0,5) val result = s"time taken $st ms \\nThe result is: $r" result } def time2file[R](f: => R)(path : String) : Unit = { FileWriter.write(timeString(f), path) } def average[R](times:Int = 10)(f: => R) : Unit = { var result = 0L var n = times while(n > 0) { result = result + time(f) n = n-1 } //average time taken val average = result/(times*1000d) println(s"time taken: $average ms ") } }
firephil/scala-math-problems
src/firesoft/util/Timer.scala
Scala
mit
968
import scala.collection.mutable.PriorityQueue import org.scalacheck._ import Prop._ import Arbitrary._ object Test extends Properties("PriorityQueue") { type E = Int // the element type used for most/all of the tests def checkInvariant[A](pq: PriorityQueue[A])(implicit ord: Ordering[A]): Boolean = { // The ordering invariant in the heap is that parent >= child. // A child at index i has a parent at index i/2 in the priority // queue's internal array. However, that array is padded with // an extra slot in front so that the first real element is at // index 1. The vector below is not padded, so subtract 1 from // every index. import ord._ val vec = pq.toVector // elements in same order as pq's internal array 2 until pq.size forall { i => vec(i/2-1) >= vec(i-1) } } property("newBuilder (in companion)") = forAll { list: List[E] => val builder = PriorityQueue.newBuilder[E] for (x <- list) builder += x val pq = builder.result() checkInvariant(pq) && pq.dequeueAll == list.sorted.reverse } property("to[PriorityQueue]") = forAll { list: List[E] => val pq = list.to[PriorityQueue] checkInvariant(pq) && pq.dequeueAll == list.sorted.reverse } property("apply (in companion)") = forAll { list: List[E] => val pq = PriorityQueue.apply(list : _*) checkInvariant(pq) && pq.dequeueAll == list.sorted.reverse } property("size, isEmpty") = forAll { list: List[E] => val pq = PriorityQueue(list : _*) pq.size == list.size && pq.isEmpty == list.isEmpty } property("+=") = forAll { (x: E, list: List[E]) => val pq = PriorityQueue(list : _*) pq += x checkInvariant(pq) && pq.dequeueAll == (x :: list).sorted.reverse } property("++= on empty") = forAll { list: List[E] => val pq = PriorityQueue.empty[E] pq ++= list checkInvariant(pq) && pq.dequeueAll == list.sorted.reverse } property("++=") = forAll { (list1: List[E], list2: List[E]) => val pq = PriorityQueue(list1 : _*) pq ++= list2 checkInvariant(pq) && pq.dequeueAll == (list1 ++ list2).sorted.reverse } property("reverse") = forAll { list: List[E] => val pq = PriorityQueue(list : _*).reverse checkInvariant(pq)(implicitly[Ordering[E]].reverse) && pq.dequeueAll == list.sorted } property("reverse then ++=") = forAll { list: List[E] => val pq = PriorityQueue.empty[E].reverse ++= list checkInvariant(pq)(implicitly[Ordering[E]].reverse) && pq.dequeueAll == list.sorted } property("reverse then +=") = forAll { (x: E, list: List[E]) => val pq = PriorityQueue(list : _*).reverse += x checkInvariant(pq)(implicitly[Ordering[E]].reverse) && pq.dequeueAll == (x +: list).sorted } property("clone") = forAll { list: List[E] => val pq = PriorityQueue(list : _*) val c = pq.clone() (pq ne c) && checkInvariant(c) && c.dequeueAll == pq.dequeueAll } property("dequeue") = forAll { list: List[E] => list.nonEmpty ==> { val pq = PriorityQueue(list : _*) val x = pq.dequeue() checkInvariant(pq) && x == list.max && pq.dequeueAll == list.sorted.reverse.tail } } }
felixmulder/scala
test/files/scalacheck/MutablePriorityQueue.scala
Scala
bsd-3-clause
3,196
package s99 import org.scalatest.{FunSpec, Matchers} class P07Spec extends FunSpec with Matchers { describe("flatten(List)") { it("(**) Flatten a nested list structure.") { P07.flatten(List(List(1, 1), 2, List(3, List(5, 8)))) should equal (List(1, 1, 2, 3, 5, 8)) P07.flatten(List.empty) should equal (List.empty) } } describe("flatten2(List)") { it("(**) Flatten a nested list structure.") { P07.flatten2(List(List(1, 1), 2, List(3, List(5, 8)))) should equal (List(1, 1, 2, 3, 5, 8)) P07.flatten2(List.empty) should equal (List.empty) } } }
qilab-/algorithm-problems
s-99/src/test/scala/s99/P07Spec.scala
Scala
unlicense
597
/*********************************************************************** * Copyright (c) 2013-2015 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 which * accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. *************************************************************************/ package org.locationtech.geomesa.accumulo.index import org.locationtech.geomesa.utils.stats.Cardinality import org.locationtech.geomesa.utils.stats.Cardinality.Cardinality import org.opengis.feature.`type`.AttributeDescriptor import org.opengis.feature.simple.SimpleFeatureType /** * Provides hints for a simple feature type */ trait StrategyHintsProvider { /** * Get a hint implementation based on the feature type * * @param sft * @return */ def strategyHints(sft: SimpleFeatureType): StrategyHints } /** * Provides hints for determining query strategies */ trait StrategyHints { /** * Returns the cardinality for an attribute * * @param ad * @return */ def cardinality(ad: AttributeDescriptor): Cardinality } /** * Implementation of hints that uses user data stored in the attribute descriptor */ class UserDataStrategyHints extends StrategyHints { import org.locationtech.geomesa.utils.geotools.RichAttributeDescriptors.RichAttributeDescriptor override def cardinality(ad: AttributeDescriptor) = ad.getCardinality() } /** * No-op implementation of hints, for when you don't care about costs. */ object NoOpHints extends StrategyHints { override def cardinality(ad: AttributeDescriptor) = Cardinality.UNKNOWN }
vpipkt/geomesa
geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/index/StrategyHints.scala
Scala
apache-2.0
1,736
package coursier.test import utest.runner.Framework class CustomFramework extends Framework { override def setup(): Unit = coursier.cache.CacheUrl.setupProxyAuth(Map( ("http", "localhost", "9083") -> ("jack", "insecure"), ("http", "localhost", "9084") -> ("wrong", "nope") )) }
alexarchambault/coursier
modules/proxy-tests/src/it/scala/coursier/test/CustomFramework.scala
Scala
apache-2.0
304
package org.backuity.clist.parse import org.backuity.clist._ import org.junit.Test class ArgumentTest extends ClistTestBase { import ArgumentTest._ @Test def parseNamedArg(): Unit = { Cli.parse(Array("--the-target=stuff")).throwExceptionOnError().withCommand(new Run) { run => run.target must_== "stuff" } } @Test def parseNamedArgAtTheEnd(): Unit = { Cli.parse(Array("--1", "--opt2=hehe", "--target=stuff")).throwExceptionOnError().withCommand(new RunWithOption) { run => run.target must_== "stuff" run.opt1 must beTrue run.opt2 must_== "hehe" } } @Test def parseMultiArgAttributes(): Unit = { Cli.parse(Array("one", "two", "3")).withCommand(new MultiArgAttributes) { cmd => cmd.argOne must_== "one" cmd.arg2 must_== "two" cmd.other must_== 3 } } @Test def parseSameArgumentMultipleTimes(): Unit = { Cli.parse(Array("same", "same", "3")).withCommand(new MultiArgAttributes) { cmd => cmd.argOne must_== "same" cmd.arg2 must_== "same" cmd.other must_== 3 } } } object ArgumentTest { class Run extends Command { var target = arg[String](name = "the-target") } class RunWithOption extends Command { var target = arg[String]() var opt1 = opt[Boolean](name = "1") var opt2 = opt[String](default = "haha") } class MultiArgAttributes extends Command { var argOne = arg[String]() var arg2 = arg[String]() var other = arg[Int]() } }
backuity/clist
tests/src/test/scala/org/backuity/clist/parse/ArgumentTest.scala
Scala
apache-2.0
1,491
package com.twitter.searchbird import com.twitter.finagle.Service import com.twitter.finagle.http.path._ import com.twitter.finagle.http.service.RoutingService import com.twitter.finagle.http.{Request, Response} import com.twitter.searchbird.thrift.{SearchbirdException, SearchbirdService} import com.twitter.server.util.JsonConverter import com.twitter.util.Future import org.jboss.netty.handler.codec.http.{HttpMethod, HttpResponseStatus} object RestfulService { case class KeyValue(key: String, value: String) case class SearchResults(query: String, results: Seq[String]) def apply(service: SearchbirdService[Future]): Service[Request, Response] = RoutingService.byMethodAndPathObject { case (HttpMethod.GET, Root / "api" / "kv" / key) => new Service[Request, Response] { override def apply(request: Request): Future[Response] = { service.get(key).map { value => request.response.setStatus(HttpResponseStatus.OK) request.response.setContentTypeJson() request.response.setContentString(JsonConverter.writeToString(KeyValue(key, value))) request.response }.handle { case SearchbirdException(msg) => request.response.setStatus(HttpResponseStatus.NOT_FOUND) request.response.setContentString(msg) request.response case e : Throwable => request.response.setStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR) request.response.setContentString(e.toString) request.response } } } case (HttpMethod.PUT, Root / "api" / "kv" / key) => new Service[Request, Response] { override def apply(request: Request): Future[Response] = { Option(request.getParam("value")) match { case Some(value) => service.put(key, value).map { _ => request.response.setStatus(HttpResponseStatus.OK) request.response }.handle { case e : Throwable => request.response.setStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR) request.response.setContentString(e.toString) request.response } case None => request.response.setStatus(HttpResponseStatus.BAD_REQUEST) request.response.setContentString("Missing param 'value'") Future.value(request.response) } } } case (HttpMethod.GET, Root / "api" / "search" / query) => new Service[Request, Response] { override def apply(request: Request): Future[Response] = { service.search(query).map { results => request.response.setStatus(HttpResponseStatus.OK) request.response.setContentTypeJson() request.response.setContentString(JsonConverter.writeToString(SearchResults(query, results))) request.response }.handle { case e : Throwable => request.response.setStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR) request.response.setContentString(e.toString) request.response } } } } }
maufee/Searchbird
src/main/scala/com/twitter/searchbird/RestfulService.scala
Scala
apache-2.0
3,115
package drawables.objects import drawables.Drawable import java.awt.Graphics2D class Cursor extends Drawable { def draw(g: Graphics2D): Unit = { //TODO } }
jamesmuerle/Bones
src/drawables/objects/Cursor.scala
Scala
mit
165
package com.twitter.gizzard.thrift.conversions import scala.collection.{JavaConversions => JC} import java.nio.{BufferUnderflowException, ByteBuffer, ByteOrder} import java.util.{AbstractList => JAbstractList, List => JList} import com.twitter.gizzard.util.{Future, ParallelSeq} object Sequences { class RichSeq[A <: AnyRef](seq: Seq[A]) { def parallel(future: Future) = new ParallelSeq(seq, future) @deprecated("rely on implicit conversion from scala.collection.JavaConversions._") def toJavaList: JList[A] = JC.asJavaList(seq) def double = for (i <- seq) yield (i, i) } implicit def seqToRichSeq[A <: AnyRef](seq: Seq[A]) = new RichSeq(seq) class RichIntSeq(seq: Seq[Int]) { def parallel(future: Future) = new ParallelSeq(seq, future) @deprecated("there is implicit conversion from Seq[Int] to java.util.List[java.lang.Integer]") def toJavaList: JList[java.lang.Integer] = intSeqToBoxedJavaList(seq) def double = for (i <- seq) yield (i, i) def pack: ByteBuffer = { val buffer = new Array[Byte](seq.size * 4) val byteBuffer = ByteBuffer.wrap(buffer) byteBuffer.order(ByteOrder.LITTLE_ENDIAN) seq.foreach { item => byteBuffer.putInt(item) } byteBuffer.rewind byteBuffer } } implicit def seqToRichIntSeq(seq: Seq[Int]) = new RichIntSeq(seq) implicit def intSeqToBoxedJavaList(seq: Seq[Int]) = { JC.asJavaList(seq.map(_.asInstanceOf[java.lang.Integer])) } implicit def boxedJavaListToIntSeq(list: JList[java.lang.Integer]) = { JC.asScalaIterable(list).toSeq.map(_.asInstanceOf[Int]) } class RichLongSeq(seq: Seq[Long]) { def parallel(future: Future) = new ParallelSeq(seq, future) @deprecated("there is implicit conversion from Seq[Long] to java.util.List[java.lang.Long]") def toJavaList: JList[java.lang.Long] = longSeqToBoxedJavaList(seq) def double = for (i <- seq) yield (i, i) def pack: ByteBuffer = { val buffer = new Array[Byte](seq.size * 8) val byteBuffer = ByteBuffer.wrap(buffer) byteBuffer.order(ByteOrder.LITTLE_ENDIAN) seq.foreach { item => byteBuffer.putLong(item) } byteBuffer.rewind byteBuffer } } implicit def seqToRichLongSeq(seq: Seq[Long]) = new RichLongSeq(seq) implicit def longSeqToBoxedJavaList(seq: Seq[Long]) = { JC.asJavaList(seq.map(_.asInstanceOf[java.lang.Long])) } implicit def boxedJavaListToLongSeq(list: JList[java.lang.Long]) = { JC.asScalaIterable(list).toSeq.map(_.asInstanceOf[Long]) } class RichByteBuffer(buffer: ByteBuffer) { def toIntArray = { buffer.order(ByteOrder.LITTLE_ENDIAN) val ints = buffer.asIntBuffer val results = new Array[Int](ints.limit) ints.get(results) results } def toLongArray = { buffer.order(ByteOrder.LITTLE_ENDIAN) val longs = buffer.asLongBuffer val results = new Array[Long](longs.limit) longs.get(results) results } } implicit def bufferToRichByteBuffer(buffer: ByteBuffer) = new RichByteBuffer(buffer) }
kangkot/gizzard
src/main/scala/com/twitter/gizzard/thrift/conversions/Sequences.scala
Scala
apache-2.0
3,066
package pt.tecnico.dsi.akkastrator import scala.concurrent.duration.{Duration, DurationInt} import scala.util.Random import akka.actor.{Actor, ActorRef, Props} import akka.testkit.TestActor.AutoPilot import akka.testkit.TestProbe import pt.tecnico.dsi.akkastrator.ActorSysSpec._ import pt.tecnico.dsi.akkastrator.DSL.{FullTask, TaskBundle, TaskSpawnOrchestrator} import pt.tecnico.dsi.akkastrator.Step6_TaskBundleSpec._ import pt.tecnico.dsi.akkastrator.Task._ import shapeless.{::, HNil} object Step6_TaskBundleSpec { class DummyActor extends Actor { def receive: Receive = Actor.emptyBehavior } class InvalidTaskSpawnOrchestrator(destinations: Array[TestProbe]) extends ControllableOrchestrator(destinations) { FullTask("A") createTask { _ => // We are stating that TaskSpawnOrchestrator will create a Bundle[Int] orchestrator // which satisfies the type constraints of the TaskSpawnOrchestrator, but then we provide // it with a Props class that creates a different actor which does not satisfies the type constraints. // This test serves to catch this error. TaskSpawnOrchestrator[Seq[Int], Bundle[Int]](Props[DummyActor]) } } val startingFruits = Seq("Farfalhi", "Kunami", "Funini", "Katuki", "MaracatΓ©") // N*B class SingleTaskBundle(destinations: Array[TestProbe]) extends ControllableOrchestrator(destinations) { // In practice an orchestrator with a single TaskBundle like this one is useless. // We created it because it serves us as a sort of incremental test ramping up to a "complex" orchestrator. // If this test fails then there is a problem that is inherent to task bundles and not to some sort of // interplay between some other akkastrator abstraction. FullTask("A") createTask { _ => TaskBundle( startingFruits.zipWithIndex.map { case (fruit, i) => task(destinationIndex = i, result = fruit.length) } ) } } // A -> N*B class TaskBundleDependency(destinations: Array[TestProbe]) extends ControllableOrchestrator(destinations) { val a = simpleMessageFulltask("A", 0, startingFruits) val b = FullTask("B", a) createTaskWith { case fruits :: HNil => TaskBundle( fruits.zipWithIndex.map { case (fruit, i) => task(destinationIndex = i + 1, result = fruit.length) } ) } } // N*B // A β†’βŸ¨ βŸ©β†’ 2N*D // N*C class ComplexTaskBundle(destinations: Array[TestProbe]) extends ControllableOrchestrator(destinations) { val a = simpleMessageFulltask("A", 0, startingFruits) val b = FullTask("B", a) createTaskWith { case fruits :: HNil => TaskBundle( fruits map { fruit => task(destinationIndex = 1, result = fruit) } ) } val c = FullTask("C", a :: HNil, Duration.Inf) createTaskWith { case fruits :: HNil => TaskBundle( fruits.map { fruit => task(destinationIndex = 2, result = fruit) } ) } val d = FullTask("D", (b, c)) createTask { case (fruitsB, fruitsC) => TaskBundle( (fruitsB ++ fruitsC) map { fruit => task(destinationIndex = 3, result = fruit) } ) } } class ComplexBFirstTaskBundle(destinations: Array[TestProbe]) extends ComplexTaskBundle(destinations) class ComplexCFirstTaskBundle(destinations: Array[TestProbe]) extends ComplexTaskBundle(destinations) // A -> N*B (one of B aborts) class InnerTaskAbortingTaskBundle(destinations: Array[TestProbe]) extends ControllableOrchestrator(destinations) { val a = simpleMessageFulltask("A", 0, startingFruits) val abortingTask = Random.nextInt(5) FullTask("B", a) createTaskWith { case fruits :: HNil => TaskBundle( fruits.zipWithIndex.map { case (fruit, i) => task(destinationIndex = i + 1, result = fruit, abortOnReceive = i == abortingTask) } ) } } // N*A (A timeouts) class OuterTaskAbortingTaskBundle(destinations: Array[TestProbe]) extends ControllableOrchestrator(destinations) { FullTask("A", timeout = 1.millis) createTaskWith { _ => TaskBundle ( task(destinationIndex = 0), task(destinationIndex = 1), task(destinationIndex = 2) ) } } } class Step6_TaskBundleSpec extends ActorSysSpec { /* "A TaskSpwanOrchestrator" should { "throw an exception" when { "the Props pass does not match the type arguments specified" in { val testCase = new TestCase[InvalidTaskSpawnOrchestrator](numberOfDestinations = 0, Set("A")) { val transformations = Seq(startTransformation) } import testCase._ testCase.differentTestPerState( { testStatus(_) }, // 1st state: startingTasks -> Unstarted. // StartOrchestrator is sent { secondState => () } ) } } } */ "An orchestrator with task bundles" should { "execute the tasks of the inner orchestrator" when { //N*A "there's a single bundle" in { val testCase = new TestCase[SingleTaskBundle](numberOfDestinations = 5, startingTasksIndexes = Set(0)) { val transformations = withStartAndFinishTransformations( { secondState => // In parallel why not startingFruits.indices.par.foreach { i => pingPong(destinations(i)) } secondState.updatedStatuses( 0 -> Waiting or Finished(startingFruits.map(_.length)) ) }, { thirdState => // By this time some of the inner tasks of A might have already finished (we don't know which, if any). // The ones that have finished will not send a message to their destination, // however the ones that are still waiting will. // If we don't pingPong for the waiting ones the entire test will fail since // the inner orchestrator will never terminate. // If we try to pingPong for the finished ones the expectMsg will timeout and throw an exception // causing the test to erroneously fail. // To get out of this pickle we pingPong every destination but ignore any timeout error. startingFruits.indices.par.foreach { i => pingPong(destinations(i), ignoreTimeoutError = true) } expectInnerOrchestratorTermination(0) thirdState.updatedStatuses( 0 -> Finished(startingFruits.map(_.length)) ) } ) } testCase.testExpectedStatusWithRecovery() } //A -> N*B "there's a single bundle with a dependency" in { val testCase = new TestCase[TaskBundleDependency](numberOfDestinations = 6, Set(0)) { val transformations = withStartAndFinishTransformations( { secondState => pingPong(destinations(0)) // Destination of Task "A" secondState.updatedStatuses( 0 -> Finished(startingFruits), 1 -> Unstarted or Waiting ) }, { thirdState => // Destinations of B tasks startingFruits.indices.par.foreach { i => // See the first test in this suite to understand why the timeout error is being ignored pingPong(destinations(i + 1), ignoreTimeoutError = true) } expectInnerOrchestratorTermination(1) thirdState.updatedStatuses( 1 -> Finished(startingFruits.map(_.length)) ) }, { fourthState => // Note that even with the orchestrator crashing the inner orchestrator won't run again. // This is consistent with the orchestrator recovering since the task B (the task bundle) will // recover the TaskFinished, thus it will never send the SpawnAndStart message. // Which in turn means the inner orchestrator will never be created. startingFruits.indices.par.foreach { i => destinations(i + 1).expectNoMessage() } fourthState } ) } testCase.testExpectedStatusWithRecovery() } // N*B // A β†’βŸ¨ βŸ©β†’ 2*N*D // N*C "there are three bundles: B and C handled at same time" in { val testCase = new TestCase[ComplexTaskBundle](numberOfDestinations = 4, Set(0)) { val transformations = withStartAndFinishTransformations( { secondState => pingPong(destinations(0)) // Destination of Task "A" secondState.updatedStatuses( 0 -> Finished(startingFruits), 1 -> Unstarted or Waiting, 2 -> Unstarted or Waiting ) }, { thirdState => startingFruits.par.foreach { _ => // See the first test in this suite to understand why the timeout error is being ignored pingPong(destinations(1), ignoreTimeoutError = true) // Destinations of B tasks pingPong(destinations(2), ignoreTimeoutError = true) // Destinations of C tasks } expectInnerOrchestratorTermination(2) expectInnerOrchestratorTermination(1) thirdState.updatedStatuses( 1 -> Finished(startingFruits), 2 -> Finished(startingFruits), 3 -> Unstarted or Waiting ) }, { fourthState => (0 until startingFruits.length * 2).par.foreach { _ => pingPong(destinations(3)) // Destinations of D tasks } expectInnerOrchestratorTermination(3) fourthState.updatedStatuses( 3 -> Finished(startingFruits ++ startingFruits) ) } ) } testCase.testExpectedStatusWithRecovery() } "there are three bundles: B handled then C handled" in { val testCase = new TestCase[ComplexBFirstTaskBundle](numberOfDestinations = 4, Set(0)) { val transformations = withStartAndFinishTransformations( { secondState => pingPong(destinations(0)) // Destination of Task "A" secondState.updatedStatuses( 0 -> Finished(startingFruits), 1 -> Unstarted or Waiting, 2 -> Unstarted or Waiting ) }, { thirdState => // B tasks startingFruits.par.foreach { _ => pingPong(destinations(1)) } expectInnerOrchestratorTermination(1) // C tasks startingFruits.par.foreach { _ => pingPong(destinations(2)) } thirdState.updatedStatuses( 1 -> Finished(startingFruits), 2 -> Waiting, 3 -> Unstarted ) }, { fourthState => // C tasks startingFruits.par.foreach { _ => pingPong(destinations(2), ignoreTimeoutError = true) } expectInnerOrchestratorTermination(2) // This proves the tasks of bundle B won't send any messages to their destination. // This will happen because B will recover and never trigger the creation of the inner orchestrator. startingFruits.par.foreach { _ => destinations(1).expectNoMessage() } destinations(3).setAutoPilot(new AutoPilot { override def run(sender: ActorRef, msg: Any) = msg match { case s: SimpleMessage => sender ! s keepRunning } }) fourthState.updatedStatuses( 2 -> Finished(startingFruits), 3 -> Waiting ) }, { fifthState => expectInnerOrchestratorTermination(3) fifthState.updatedStatuses( 3 -> Finished(startingFruits ++ startingFruits) ) } ) } testCase.testExpectedStatusWithRecovery() } "there are three bundles: C handled then B handled" in { val testCase = new TestCase[ComplexCFirstTaskBundle](numberOfDestinations = 4, Set(0)) { val transformations = withStartAndFinishTransformations( { secondState => pingPong(destinations(0)) // Destination of Task "A" secondState.updatedStatuses( 0 -> Finished(startingFruits), 1 -> Unstarted or Waiting, 2 -> Unstarted or Waiting ) }, { thirdState => // C tasks startingFruits.par.foreach { _ => pingPong(destinations(2)) } expectInnerOrchestratorTermination(2) // B tasks startingFruits.par.foreach { _ => pingPong(destinations(1)) } thirdState.updatedStatuses( 1 -> Waiting, 2 -> Finished(startingFruits), 3 -> Unstarted ) }, { fourthState => // B tasks startingFruits.par.foreach { _ => pingPong(destinations(1), ignoreTimeoutError = true) } expectInnerOrchestratorTermination(2) // This proves the tasks of bundle C won't send any messages to their destination. // This will happen because C will recover and never send the message to the Spawner. startingFruits.par.foreach { _ => destinations(2).expectNoMessage() } destinations(3).setAutoPilot(new AutoPilot { override def run(sender: ActorRef, msg: Any) = msg match { case s: SimpleMessage => sender ! s keepRunning } }) fourthState.updatedStatuses( 1 -> Finished(startingFruits), 3 -> Waiting ) }, { fifthState => expectInnerOrchestratorTermination(3) fifthState.updatedStatuses( 3 -> Finished(startingFruits ++ startingFruits) ) } ) } testCase.testExpectedStatusWithRecovery() } } "should abort" when { // A -> N*B one of B aborts "when an inner task aborts" in { val testCase = new TestCase[InnerTaskAbortingTaskBundle](numberOfDestinations = 6, Set(0)) { val transformations = withStartAndFinishTransformations( { secondState => pingPong(destinations(0)) // Destination of Task "A" secondState.updatedStatuses( 0 -> Finished(startingFruits), 1 -> Unstarted or Waiting ) }, { thirdState => startingFruits.indices.par.foreach { i => pingPong(destinations(i + 1)) } expectInnerOrchestratorTermination(1) thirdState.updatedStatuses( 1 -> Aborted(testsAbortReason) ) }, { fourthState => parentProbe expectMsg OrchestratorAborted fourthState } ) } testCase.testExpectedStatusWithRecovery() } // N*A the bundle timeouts "the bundle timeouts" in { val testCase = new TestCase[OuterTaskAbortingTaskBundle](numberOfDestinations = 3, Set(0)) { val transformations = withStartAndFinishTransformations( { secondState => (0 until 3).par.foreach { i => // Just receive the messages without answering to ensure the outer task timeouts // We are ignoring the timeout. See the test "there's a single bundle" to understand why. pingPong(destinations(i), ignoreTimeoutError = true, pong = false) } import scala.concurrent.TimeoutException secondState.updatedStatuses( 0 -> Aborted(new TimeoutException()) ) }, { thirdState => // While recovering the bundle A will handle the MessageReceive. Or in other words its // spawner won't create the inner orchestrator and therefor the inner tasks will never send their messages. (0 until 3).indices.par.foreach { i => destinations(i).expectNoMessage() } parentProbe expectMsg OrchestratorAborted thirdState } ) } testCase.testExpectedStatusWithRecovery() } } } }
ist-dsi/akkastrator
src/test/scala/pt/tecnico/dsi/akkastrator/Step6_TaskBundleSpec.scala
Scala
mit
17,374
/* * The MIT License (MIT) * * Copyright (c) 2016 Algolia * http://www.algolia.com/ * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package algolia.definitions import algolia.http.{DELETE, GET, HttpPayload, POST} import algolia.inputs.{UserIDAssignment, UserIDsAssignment} import algolia.objects.RequestOptions import org.json4s.Formats import org.json4s.native.Serialization.write case class AssignUserIDDefinition( assignment: UserIDAssignment, requestOptions: Option[RequestOptions] = None )(implicit val formats: Formats) extends Definition { override type T = AssignUserIDDefinition override def options(requestOptions: RequestOptions): AssignUserIDDefinition = copy(requestOptions = Some(requestOptions)) override private[algolia] def build(): HttpPayload = { val extraHeaders = Map("X-Algolia-User-ID" -> assignment.userID) val newRequestOptions = requestOptions match { case Some(opts) => opts.addExtraHeaders(extraHeaders) case None => RequestOptions(extraHeaders = Some(extraHeaders)) } val body = Map( "cluster" -> assignment.cluster ) HttpPayload( POST, Seq("1", "clusters", "mapping"), body = Some(write(body)), isSearch = false, requestOptions = Some(newRequestOptions) ) } } case class AssignUserIDsDefinition( assignment: UserIDsAssignment, requestOptions: Option[RequestOptions] = None )(implicit val formats: Formats) extends Definition { override type T = AssignUserIDsDefinition override def options( requestOptions: RequestOptions ): AssignUserIDsDefinition = copy(requestOptions = Some(requestOptions)) override private[algolia] def build(): HttpPayload = { val body = Map( "cluster" -> assignment.cluster, "users" -> assignment.userIDs ) HttpPayload( POST, Seq("1", "clusters", "mapping", "batch"), body = Some(write(body)), isSearch = false, requestOptions = requestOptions ) } } case class GetTopUserIDDefinition(requestOptions: Option[RequestOptions] = None) extends Definition { override type T = GetTopUserIDDefinition override def options(requestOptions: RequestOptions): GetTopUserIDDefinition = copy(requestOptions = Some(requestOptions)) override private[algolia] def build(): HttpPayload = HttpPayload( GET, Seq("1", "clusters", "mapping", "top"), isSearch = true, requestOptions = requestOptions ) } case class GetUserIDDefinition( userID: String, requestOptions: Option[RequestOptions] = None ) extends Definition { override type T = GetUserIDDefinition override def options(requestOptions: RequestOptions): GetUserIDDefinition = copy(requestOptions = Some(requestOptions)) override private[algolia] def build(): HttpPayload = { HttpPayload( GET, Seq("1", "clusters", "mapping", userID), isSearch = true, requestOptions = requestOptions ) } } case class ListClustersDefinition(requestOptions: Option[RequestOptions] = None) extends Definition { override type T = ListClustersDefinition override def options(requestOptions: RequestOptions): ListClustersDefinition = copy(Some(requestOptions)) override private[algolia] def build(): HttpPayload = { HttpPayload( GET, Seq("1", "clusters"), isSearch = true, requestOptions = requestOptions ) } } case class ListUserIDsDefinition( page: Int = 0, hitsPerPage: Int = 20, requestOptions: Option[RequestOptions] = None )(implicit val formats: Formats) extends Definition { override type T = ListUserIDsDefinition override def options(requestOptions: RequestOptions): ListUserIDsDefinition = copy(requestOptions = Some(requestOptions)) def page(page: Int): ListUserIDsDefinition = copy(page = page) def hitsPerPage(hitsPerPage: Int): ListUserIDsDefinition = copy(hitsPerPage = hitsPerPage) override private[algolia] def build(): HttpPayload = { val body = Map( "page" -> page, "hitsPerPage" -> hitsPerPage ) HttpPayload( GET, Seq("1", "clusters", "mapping"), isSearch = true, body = Some(write(body)), requestOptions = requestOptions ) } } case class RemoveUserIDDefinition( userID: String, requestOptions: Option[RequestOptions] = None ) extends Definition { override type T = RemoveUserIDDefinition override def options(requestOptions: RequestOptions): RemoveUserIDDefinition = copy(requestOptions = Some(requestOptions)) override private[algolia] def build(): HttpPayload = { val extraHeaders = Map("X-Algolia-User-ID" -> userID) val newRequestOptions = requestOptions match { case Some(opts) => opts.addExtraHeaders(extraHeaders) case None => RequestOptions(extraHeaders = Some(extraHeaders)) } HttpPayload( DELETE, Seq("1", "clusters", "mapping"), isSearch = false, requestOptions = Some(newRequestOptions) ) } } case class HadPendingMappingsDefinition( pending: Boolean = false, requestOptions: Option[RequestOptions] = None )(implicit val formats: Formats) extends Definition { override type T = HadPendingMappingsDefinition override def options( requestOptions: RequestOptions ): HadPendingMappingsDefinition = copy(requestOptions = Some(requestOptions)) val queryParameters: Option[Map[String, String]] = Some( Map("getClusters" -> pending.toString) ) override private[algolia] def build(): HttpPayload = { HttpPayload( GET, Seq("1", "clusters", "mapping", "pending"), isSearch = false, queryParameters = queryParameters, requestOptions = requestOptions ) } } case class SearchUserIDDefinition( query: String, cluster: String = "", page: Int = 0, hitsPerPage: Int = 20, requestOptions: Option[RequestOptions] = None )(implicit val formats: Formats) extends Definition { override type T = SearchUserIDDefinition override def options(requestOptions: RequestOptions): SearchUserIDDefinition = copy(requestOptions = Some(requestOptions)) def cluster(cluster: String): SearchUserIDDefinition = copy(cluster = cluster) def page(page: Int): SearchUserIDDefinition = copy(page = page) def hitsPerPage(hitsPerPage: Int): SearchUserIDDefinition = copy(hitsPerPage = hitsPerPage) override private[algolia] def build(): HttpPayload = { val body = Map( "query" -> query, "cluster" -> cluster, "page" -> page, "hitsPerPage" -> hitsPerPage ) HttpPayload( POST, Seq("1", "clusters", "mapping", "search"), body = Some(write(body)), isSearch = true, requestOptions = requestOptions ) } }
algolia/algoliasearch-client-scala
src/main/scala/algolia/definitions/MCMDefinition.scala
Scala
mit
7,868
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest.fixture import org.scalatest._ import SharedHelpers._ class TestDataFixtureSpec extends org.scalatest.FunSpec { describe("A TestDataFixture") { it("should pass the test data to each test") { val myConfigMap = ConfigMap("hello" -> "world", "salt" -> "pepper") class MySuite extends fixture.FunSuite with TestDataFixture { var testDataPassed = false test("something") { (td: TestData) => if (td.configMap == myConfigMap && td.name == "something") testDataPassed = true } } val suite = new MySuite suite.run(None, Args(SilentReporter, Stopper.default, Filter(), myConfigMap, None, new Tracker, Set.empty)) assert(suite.testDataPassed) } } }
travisbrown/scalatest
src/test/scala/org/scalatest/fixture/TestDataFixtureSpec.scala
Scala
apache-2.0
1,357
package com.bdir.back.app.routes import com.bdir.back.app.{BdirServlet, Readiness} import org.scalatra.ScalatraServlet trait RouteServlets { def routes: List[(String, ScalatraServlet)] = { List( "/" -> new BdirServlet, "/_health/liveness" -> new Readiness ) } }
ervitis/bdir-back
src/main/scala/com/bdir/back/app/routes/RouteServlets.scala
Scala
mit
288
package com.sksamuel.avro4s.json import java.util import org.apache.avro.Schema import org.codehaus.jackson.node.TextNode /** * Accepts a json string, and returns an Avro Schema that best matches the json string. * * Converts: * * - json arrays to avro arrays * - objects to records * - doubles to doubles * - ints/longs to longs * - booleans to booleans * - nulls to union(string,null) */ class JsonToAvroConverter(namespace: String, avroStringTypeIsString: Boolean = false, jsonNamingStrategy: JsonNamingStrategy = CamelCase) { import org.json4s._ import org.json4s.native.JsonMethods._ import scala.collection.JavaConverters._ def convert(name: String, str: String): Schema = { convert(name, parse(str).transformField { case JField(n, v) => val newName = toCamelCase(n, jsonNamingStrategy) (newName, v) }) } def convert(name: String, value: JValue): Schema = value match { case JArray(value) => Schema.createArray(convert(name, value.head)) case JBool(_) => Schema.create(Schema.Type.BOOLEAN) case JDecimal(_) => Schema.create(Schema.Type.DOUBLE) case JDouble(_) => Schema.create(Schema.Type.DOUBLE) case JInt(_) => Schema.create(Schema.Type.LONG) case JLong(_) => Schema.create(Schema.Type.LONG) case JNothing => Schema.create(Schema.Type.NULL) case JNull => Schema.createUnion(util.Arrays.asList(Schema.create(Schema.Type.NULL), createStringSchema)) case JString(_) => createStringSchema case JSet(value) => Schema.createArray(convert(name, value.head)) case JObject(values) => val record = Schema.createRecord(name, null, namespace, false) val doc: String = null val default: AnyRef = null val fields = values.map { case (name, value) => new Schema.Field(name, convert(name, value), doc, default) } record.setFields(fields.asJava) record } private def createStringSchema = { val schema = Schema.create(Schema.Type.STRING) if (avroStringTypeIsString) schema.addProp("avro.java.string", new TextNode("String")) schema } private def toCamelCase(s: String, from: JsonNamingStrategy): String = { def fromDelimited(sep: String, s: String): String = { val head :: tail = s.split(sep).toList head ++ tail.foldLeft("")((acc, word) => acc ++ word.capitalize) } def decapitalize(s: String): String = { if (s.nonEmpty) s.head.toLower + s.tail else s } from match { case CamelCase => s case PascalCase => decapitalize(s) case SnakeCase => fromDelimited("_", s) case LispCase => fromDelimited("-", s) } } }
YuvalItzchakov/avro4s
avro4s-json/src/main/scala/com/sksamuel/avro4s/json/JsonToAvroConverter.scala
Scala
mit
2,696
/* * Copyright (C) 2005, The OpenURP Software. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.openurp.std.register.model import org.beangle.data.orm.MappingModule class DefaultMapping extends MappingModule { def binding(): Unit = { bind[Register].declare { e => e.remark is length(50) e.operateBy is length(50) e.operateIp is length(100) } } }
openurp/api
std/src/main/scala/org/openurp/std/register/model/mapping.scala
Scala
lgpl-3.0
1,016
package com.sjsu.bikeshare.web; import com.sjsu.bikeshare.domain._ import com.sjsu.bikeshare.service._ import org.springframework.web.bind.annotation.PathVariable import org.springframework.web.bind.annotation.RequestMapping import org.springframework.stereotype.Controller import org.springframework.http.{ ResponseEntity, HttpStatus } import org.springframework.web.bind.annotation._ import org.springframework.context.annotation.{ ComponentScan, Configuration } import org.springframework.boot.autoconfigure.EnableAutoConfiguration import java.io._ import javax.validation.Valid import javax.persistence.Entity import org.springframework.web.context.request._ import java.util.{ List, ArrayList } import com.mongodb.casbah.Imports._ import com.sjsu.bikeshare.service.UserRepository import org.springframework.ui.Model import org.springframework.validation.BindingResult import org.springframework.beans.factory.annotation.Autowired import com.twilio.sdk.resource.instance.Account import com.twilio.sdk.TwilioRestClient import com.twilio.sdk.TwilioRestException import com.twilio.sdk.resource.factory.MessageFactory import com.twilio.sdk.resource.instance.Message import org.apache.http.NameValuePair import org.apache.http.message.BasicNameValuePair import javax.servlet.http.HttpSession import java.util._ import java.lang._ @Controller @RequestMapping(value = Array("/api/v1/users")) class UserController { private var randomCode : String = "" @RequestMapping(method = Array(RequestMethod.POST)) @ResponseStatus(value = HttpStatus.CREATED) def createUser(@Valid @RequestBody user:User) = { UserRepository.populateUser(user) } @RequestMapping(value=Array("/signup"),method = Array(RequestMethod.GET)) def SignUpForm( model:Model) = { model.addAttribute("user", new User()) model.addAttribute("userLogin", new UserLogin()) "SignUp" } @RequestMapping(value=Array("/signupnow"),method = Array(RequestMethod.POST)) def SignUpSubmit(@Valid user:User,bindingResult:BindingResult,model:Model,userLogin:UserLogin) = { if (bindingResult.hasErrors()) { println("in here") "SignUp" } else { model.addAttribute("user",user) model.addAttribute("rcode",randomCode) userLogin.email=user.email userLogin.setName(user.getFirstName) model.addAttribute("userLogin",userLogin) println("randomcode: " +randomCode) println("user.getTwiliocode : " + user.getTwiliocode ) println("user.getname : " + user.getFirstName ) println("user.email : " + user.getEmail ) if ( randomCode == user.getTwiliocode.toString ){ createUser(user) "homepage" } else { println ("codes not matched") println("twiliocde: " +randomCode) println("user.getTwiliocode : " + user.getTwiliocode ) model.addAttribute("user", new User()) "SignUp" } } } @RequestMapping(value=Array("/sendcode"),method = Array(RequestMethod.POST),produces = Array("application/json")) def sendSMS(@RequestBody contactNo:User,model:Model) = { val AUTH_TOKEN = "b62e99e1cc3899f53f48e8a5f89d1628" val ACCOUNT_SID = "AC164368f1f5629e34ddb91d0378d9bd47" var PHONE_NUMBER = contactNo.getContactNo val client = new TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN) val account = client.getAccount val messageFactory = account.getMessageFactory val params = new ArrayList[NameValuePair]() //val tempCode = 5678 val random = new Random() val tempCode = random.nextInt(9999) randomCode = tempCode.toString() params.add(new BasicNameValuePair("To", PHONE_NUMBER)) params.add(new BasicNameValuePair("From", "+13095175765")) params.add(new BasicNameValuePair("Body", "Congrats! Your code# is " +randomCode)) val sms = messageFactory.create(params) model.addAttribute("user", new User()) "SignUp" } @RequestMapping(value=Array("/userlogin"),method = Array(RequestMethod.GET)) def userLoginForm( model:Model) = { model.addAttribute("userLogin", new UserLogin()) "login"} @RequestMapping(value=Array("/userval"),method = Array(RequestMethod.POST)) def getUser(@ModelAttribute @Valid userLogin:UserLogin,bindingResult: BindingResult,model:Model) = { if (bindingResult.hasErrors()) { "login" } else { val userName=UserRepository.validateUser(userLogin) if (userName.equalsIgnoreCase("Invalid User or password")) { println("Not a success case,so returning to login page again") model.addAttribute("userLogin", new UserLogin()) "login" } else { userLogin.setName(userName) println(" userName "+userName) model.addAttribute("userLogin", userLogin) "homepage" } } } @RequestMapping(value=Array("/{email}/bike"),method = Array(RequestMethod.GET)) def getBikes(@PathVariable email:String) = { println (email) BikeRepository.getBikes(email) } @RequestMapping(value=Array("/dashboard"),method = Array(RequestMethod.POST)) def dashboard(userLogin:UserLogin,model:Model) = { model.addAttribute("userLogin",userLogin) println("email frm header"+userLogin.getEmail()) "homepage" } @RequestMapping(value=Array("/bikeshare"),method = Array(RequestMethod.GET)) def bikesharePageRedirect(model:Model) = { "redirect:/bikeshare" } }
komaldedhia/cmpe-273-project
src/main/scala/com/sjsu/bikeshare/web/UserController.scala
Scala
mit
5,491
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.api import java.util.Properties import kafka.server.KafkaConfig import kafka.utils.{JaasTestUtils, TestUtils} import kafka.zk.ConfigEntityChangeNotificationZNode import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, ScramCredentialInfo, UserScramCredentialAlteration, UserScramCredentialUpsertion, ScramMechanism => PublicScramMechanism} import org.apache.kafka.common.config.SaslConfigs import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol} import org.apache.kafka.common.security.scram.internals.ScramMechanism import org.apache.kafka.common.security.token.delegation.DelegationToken import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} import scala.jdk.CollectionConverters._ class DelegationTokenEndToEndAuthorizationTest extends EndToEndAuthorizationTest { val kafkaClientSaslMechanism = "SCRAM-SHA-256" val kafkaServerSaslMechanisms = ScramMechanism.mechanismNames.asScala.toList override protected def securityProtocol = SecurityProtocol.SASL_SSL override protected val serverSaslProperties = Some(kafkaServerSaslProperties(kafkaServerSaslMechanisms, kafkaClientSaslMechanism)) override protected val clientSaslProperties = Some(kafkaClientSaslProperties(kafkaClientSaslMechanism)) override val clientPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, JaasTestUtils.KafkaScramUser) private val clientPassword = JaasTestUtils.KafkaScramPassword override val kafkaPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, JaasTestUtils.KafkaScramAdmin) private val kafkaPassword = JaasTestUtils.KafkaScramAdminPassword private val privilegedAdminClientConfig = new Properties() this.serverConfig.setProperty(KafkaConfig.DelegationTokenSecretKeyProp, "testKey") override def configureSecurityBeforeServersStart(): Unit = { super.configureSecurityBeforeServersStart() zkClient.makeSurePersistentPathExists(ConfigEntityChangeNotificationZNode.path) // Create broker admin credentials before starting brokers createScramCredentials(zkConnect, kafkaPrincipal.getName, kafkaPassword) } override def createPrivilegedAdminClient() = createScramAdminClient(kafkaClientSaslMechanism, kafkaPrincipal.getName, kafkaPassword) override def configureSecurityAfterServersStart(): Unit = { super.configureSecurityAfterServersStart() // create scram credential for user "scram-user" createScramCredentialsViaPrivilegedAdminClient(clientPrincipal.getName, clientPassword) waitForUserScramCredentialToAppearOnAllBrokers(clientPrincipal.getName, kafkaClientSaslMechanism) //create a token with "scram-user" credentials and a privileged token with scram-admin credentials val tokens = createDelegationTokens() val token = tokens._1 val privilegedToken = tokens._2 privilegedAdminClientConfig.putAll(adminClientConfig) // pass token to client jaas config val clientLoginContext = JaasTestUtils.tokenClientLoginModule(token.tokenInfo().tokenId(), token.hmacAsBase64String()) producerConfig.put(SaslConfigs.SASL_JAAS_CONFIG, clientLoginContext) consumerConfig.put(SaslConfigs.SASL_JAAS_CONFIG, clientLoginContext) adminClientConfig.put(SaslConfigs.SASL_JAAS_CONFIG, clientLoginContext) val privilegedClientLoginContext = JaasTestUtils.tokenClientLoginModule(privilegedToken.tokenInfo().tokenId(), privilegedToken.hmacAsBase64String()) privilegedAdminClientConfig.put(SaslConfigs.SASL_JAAS_CONFIG, privilegedClientLoginContext) } @Test def testCreateUserWithDelegationToken(): Unit = { val privilegedAdminClient = Admin.create(privilegedAdminClientConfig) try { val user = "user" val results = privilegedAdminClient.alterUserScramCredentials(List[UserScramCredentialAlteration]( new UserScramCredentialUpsertion(user, new ScramCredentialInfo(PublicScramMechanism.SCRAM_SHA_256, 4096), "password")).asJava) assertEquals(1, results.values.size) val future = results.values.get(user) future.get // make sure we haven't completed exceptionally } finally { privilegedAdminClient.close() } } @BeforeEach override def setUp(testInfo: TestInfo): Unit = { startSasl(jaasSections(kafkaServerSaslMechanisms, Option(kafkaClientSaslMechanism), Both)) super.setUp(testInfo) privilegedAdminClientConfig.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) } private def createDelegationTokens(): (DelegationToken, DelegationToken) = { val adminClient = createScramAdminClient(kafkaClientSaslMechanism, clientPrincipal.getName, clientPassword) try { val privilegedAdminClient = createScramAdminClient(kafkaClientSaslMechanism, kafkaPrincipal.getName, kafkaPassword) try { val token = adminClient.createDelegationToken().delegationToken().get() val privilegedToken = privilegedAdminClient.createDelegationToken().delegationToken().get() //wait for tokens to reach all the brokers TestUtils.waitUntilTrue(() => servers.forall(server => server.tokenCache.tokens().size() == 2), "Timed out waiting for token to propagate to all servers") (token, privilegedToken) } finally { privilegedAdminClient.close() } } finally { adminClient.close() } } }
TiVo/kafka
core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationTest.scala
Scala
apache-2.0
6,161
package org.jetbrains.sbt package codeInspection import com.intellij.codeInspection.{ProblemHighlightType, ProblemsHolder} import com.intellij.openapi.project.Project import com.intellij.psi.PsiElement import org.jetbrains.plugins.scala.codeInspection.{AbstractFixOnPsiElement, AbstractInspection} import org.jetbrains.plugins.scala.lang.psi.api.ScalaRecursiveElementVisitor import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScReferencePattern import org.jetbrains.plugins.scala.lang.psi.api.expr.ScMethodCall import org.jetbrains.plugins.scala.lang.psi.api.statements.ScPatternDefinition import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory import org.jetbrains.plugins.scala.lang.psi.impl.base.ScLiteralImpl /** * @author Nikolay Obedin * @since 8/5/14. */ class SbtReplaceProjectWithProjectInInspection extends AbstractInspection { def actionFor(holder: ProblemsHolder): PartialFunction[PsiElement, Any] = { case defn: ScPatternDefinition if defn.getContainingFile.getFileType.getName == Sbt.Name => (defn.expr, defn.bindings) match { case (Some(call: ScMethodCall), Seq(projectNamePattern: ScReferencePattern)) => findPlaceToFix(call, projectNamePattern.getText).foreach { place => holder.registerProblem(place, SbtBundle("sbt.inspection.projectIn.name"), ProblemHighlightType.GENERIC_ERROR_OR_WARNING, new SbtReplaceProjectWithProjectInQuickFix(place)) } case _ => // do nothing } } private def findPlaceToFix(call: ScMethodCall, projectName: String): Option[ScMethodCall] = { var placeToFix: Option[ScMethodCall] = None val visitor = new ScalaRecursiveElementVisitor { override def visitMethodCallExpression(call: ScMethodCall) = call match { case ScMethodCall(expr, Seq(ScLiteralImpl.string(name), _)) if expr.getText == "Project" && name == projectName => placeToFix = Some(call) case _ => super.visitMethodCallExpression(call) } } call.accept(visitor) placeToFix } } class SbtReplaceProjectWithProjectInQuickFix(call: ScMethodCall) extends AbstractFixOnPsiElement(SbtBundle("sbt.inspection.projectIn.name"), call) { def doApplyFix(project: Project) = { val place = getElement place match { case ScMethodCall(_, Seq(_, pathElt)) => place.replace(ScalaPsiElementFactory.createExpressionFromText("project.in(" + pathElt.getText + ")", place.getManager)) case _ => // do nothing } } }
triggerNZ/intellij-scala
src/org/jetbrains/sbt/codeInspection/SbtReplaceProjectWithProjectInInspection.scala
Scala
apache-2.0
2,609
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.ct600.v3 import uk.gov.hmrc.ct.box._ import uk.gov.hmrc.ct.ct600.v3.retriever.CT600BoxRetriever case class B150(value: Option[Boolean]) extends CtBoxIdentifier("Banks, building societies, insurance companies and other financial concerns") with CtOptionalBoolean with Input with ValidatableBox[CT600BoxRetriever] { override def validate(boxRetriever: CT600BoxRetriever): Set[CtValidation] = validateBooleanAsMandatory("B150", this) }
hmrc/ct-calculations
src/main/scala/uk/gov/hmrc/ct/ct600/v3/B150.scala
Scala
apache-2.0
1,065
package example trait Show[-A]: extension (a: A) def show: String given (using rec: Show[String]): Show[String] = ??? // must be Show[String] as the argument given (using rec: => Show[String]): Show[Option[String]] = ??? // must be byname argument def test = Option("").show
dotty-staging/dotty
tests/pos/i8182.scala
Scala
apache-2.0
281
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark import java.io.File import java.net.{MalformedURLException, URI} import java.nio.charset.StandardCharsets import java.util.concurrent.TimeUnit import scala.concurrent.duration._ import scala.concurrent.Await import com.google.common.io.Files import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.hadoop.io.{BytesWritable, LongWritable, Text} import org.apache.hadoop.mapred.TextInputFormat import org.apache.hadoop.mapreduce.lib.input.{TextInputFormat => NewTextInputFormat} import org.scalatest.concurrent.Eventually import org.scalatest.Matchers._ import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart, SparkListenerTaskEnd, SparkListenerTaskStart} import org.apache.spark.util.Utils class SparkContextSuite extends SparkFunSuite with LocalSparkContext with Eventually { test("Only one SparkContext may be active at a time") { // Regression test for SPARK-4180 val conf = new SparkConf().setAppName("test").setMaster("local") .set("spark.driver.allowMultipleContexts", "false") sc = new SparkContext(conf) val envBefore = SparkEnv.get // A SparkContext is already running, so we shouldn't be able to create a second one intercept[SparkException] { new SparkContext(conf) } val envAfter = SparkEnv.get // SparkEnv and other context variables should be the same assert(envBefore == envAfter) // After stopping the running context, we should be able to create a new one resetSparkContext() sc = new SparkContext(conf) } test("Can still construct a new SparkContext after failing to construct a previous one") { val conf = new SparkConf().set("spark.driver.allowMultipleContexts", "false") // This is an invalid configuration (no app name or master URL) intercept[SparkException] { new SparkContext(conf) } // Even though those earlier calls failed, we should still be able to create a new context sc = new SparkContext(conf.setMaster("local").setAppName("test")) } test("Check for multiple SparkContexts can be disabled via undocumented debug option") { var secondSparkContext: SparkContext = null try { val conf = new SparkConf().setAppName("test").setMaster("local") .set("spark.driver.allowMultipleContexts", "true") sc = new SparkContext(conf) secondSparkContext = new SparkContext(conf) } finally { Option(secondSparkContext).foreach(_.stop()) } } test("Test getOrCreate") { var sc2: SparkContext = null SparkContext.clearActiveContext() val conf = new SparkConf().setAppName("test").setMaster("local") sc = SparkContext.getOrCreate(conf) assert(sc.getConf.get("spark.app.name").equals("test")) sc2 = SparkContext.getOrCreate(new SparkConf().setAppName("test2").setMaster("local")) assert(sc2.getConf.get("spark.app.name").equals("test")) assert(sc === sc2) assert(sc eq sc2) // Try creating second context to confirm that it's still possible, if desired sc2 = new SparkContext(new SparkConf().setAppName("test3").setMaster("local") .set("spark.driver.allowMultipleContexts", "true")) sc2.stop() } test("BytesWritable implicit conversion is correct") { // Regression test for SPARK-3121 val bytesWritable = new BytesWritable() val inputArray = (1 to 10).map(_.toByte).toArray bytesWritable.set(inputArray, 0, 10) bytesWritable.set(inputArray, 0, 5) val converter = WritableConverter.bytesWritableConverter() val byteArray = converter.convert(bytesWritable) assert(byteArray.length === 5) bytesWritable.set(inputArray, 0, 0) val byteArray2 = converter.convert(bytesWritable) assert(byteArray2.length === 0) } test("basic case for addFile and listFiles") { val dir = Utils.createTempDir() val file1 = File.createTempFile("someprefix1", "somesuffix1", dir) val absolutePath1 = file1.getAbsolutePath val file2 = File.createTempFile("someprefix2", "somesuffix2", dir) val relativePath = file2.getParent + "/../" + file2.getParentFile.getName + "/" + file2.getName val absolutePath2 = file2.getAbsolutePath try { Files.write("somewords1", file1, StandardCharsets.UTF_8) Files.write("somewords2", file2, StandardCharsets.UTF_8) val length1 = file1.length() val length2 = file2.length() sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) sc.addFile(file1.getAbsolutePath) sc.addFile(relativePath) sc.parallelize(Array(1), 1).map(x => { val gotten1 = new File(SparkFiles.get(file1.getName)) val gotten2 = new File(SparkFiles.get(file2.getName)) if (!gotten1.exists()) { throw new SparkException("file doesn't exist : " + absolutePath1) } if (!gotten2.exists()) { throw new SparkException("file doesn't exist : " + absolutePath2) } if (length1 != gotten1.length()) { throw new SparkException( s"file has different length $length1 than added file ${gotten1.length()} : " + absolutePath1) } if (length2 != gotten2.length()) { throw new SparkException( s"file has different length $length2 than added file ${gotten2.length()} : " + absolutePath2) } if (absolutePath1 == gotten1.getAbsolutePath) { throw new SparkException("file should have been copied :" + absolutePath1) } if (absolutePath2 == gotten2.getAbsolutePath) { throw new SparkException("file should have been copied : " + absolutePath2) } x }).count() assert(sc.listFiles().filter(_.contains("somesuffix1")).size == 1) } finally { sc.stop() } } test("add and list jar files") { val jarPath = Thread.currentThread().getContextClassLoader.getResource("TestUDTF.jar") try { sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) sc.addJar(jarPath.toString) assert(sc.listJars().filter(_.contains("TestUDTF.jar")).size == 1) } finally { sc.stop() } } test("SPARK-17650: malformed url's throw exceptions before bricking Executors") { try { sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) Seq("http", "https", "ftp").foreach { scheme => val badURL = s"$scheme://user:pwd/path" val e1 = intercept[MalformedURLException] { sc.addFile(badURL) } assert(e1.getMessage.contains(badURL)) val e2 = intercept[MalformedURLException] { sc.addJar(badURL) } assert(e2.getMessage.contains(badURL)) assert(sc.addedFiles.isEmpty) assert(sc.addedJars.isEmpty) } } finally { sc.stop() } } test("addFile recursive works") { val pluto = Utils.createTempDir() val neptune = Utils.createTempDir(pluto.getAbsolutePath) val saturn = Utils.createTempDir(neptune.getAbsolutePath) val alien1 = File.createTempFile("alien", "1", neptune) val alien2 = File.createTempFile("alien", "2", saturn) try { sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) sc.addFile(neptune.getAbsolutePath, true) sc.parallelize(Array(1), 1).map(x => { val sep = File.separator if (!new File(SparkFiles.get(neptune.getName + sep + alien1.getName)).exists()) { throw new SparkException("can't access file under root added directory") } if (!new File(SparkFiles.get(neptune.getName + sep + saturn.getName + sep + alien2.getName)) .exists()) { throw new SparkException("can't access file in nested directory") } if (new File(SparkFiles.get(pluto.getName + sep + neptune.getName + sep + alien1.getName)) .exists()) { throw new SparkException("file exists that shouldn't") } x }).count() } finally { sc.stop() } } test("addFile recursive can't add directories by default") { val dir = Utils.createTempDir() try { sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) intercept[SparkException] { sc.addFile(dir.getAbsolutePath) } } finally { sc.stop() } } test("cannot call addFile with different paths that have the same filename") { val dir = Utils.createTempDir() try { val subdir1 = new File(dir, "subdir1") val subdir2 = new File(dir, "subdir2") assert(subdir1.mkdir()) assert(subdir2.mkdir()) val file1 = new File(subdir1, "file") val file2 = new File(subdir2, "file") Files.write("old", file1, StandardCharsets.UTF_8) Files.write("new", file2, StandardCharsets.UTF_8) sc = new SparkContext("local-cluster[1,1,1024]", "test") sc.addFile(file1.getAbsolutePath) def getAddedFileContents(): String = { sc.parallelize(Seq(0)).map { _ => scala.io.Source.fromFile(SparkFiles.get("file")).mkString }.first() } assert(getAddedFileContents() === "old") intercept[IllegalArgumentException] { sc.addFile(file2.getAbsolutePath) } assert(getAddedFileContents() === "old") } finally { Utils.deleteRecursively(dir) } } // Regression tests for SPARK-16787 for ( schedulingMode <- Seq("local-mode", "non-local-mode"); method <- Seq("addJar", "addFile") ) { val jarPath = Thread.currentThread().getContextClassLoader.getResource("TestUDTF.jar").toString val master = schedulingMode match { case "local-mode" => "local" case "non-local-mode" => "local-cluster[1,1,1024]" } test(s"$method can be called twice with same file in $schedulingMode (SPARK-16787)") { sc = new SparkContext(master, "test") method match { case "addJar" => sc.addJar(jarPath) sc.addJar(jarPath) case "addFile" => sc.addFile(jarPath) sc.addFile(jarPath) } } } test("add jar with invalid path") { val tmpDir = Utils.createTempDir() val tmpJar = File.createTempFile("test", ".jar", tmpDir) sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) sc.addJar(tmpJar.getAbsolutePath) // Invaid jar path will only print the error log, will not add to file server. sc.addJar("dummy.jar") sc.addJar("") sc.addJar(tmpDir.getAbsolutePath) sc.listJars().size should be (1) sc.listJars().head should include (tmpJar.getName) } test("Cancelling job group should not cause SparkContext to shutdown (SPARK-6414)") { try { sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) val future = sc.parallelize(Seq(0)).foreachAsync(_ => {Thread.sleep(1000L)}) sc.cancelJobGroup("nonExistGroupId") Await.ready(future, Duration(2, TimeUnit.SECONDS)) // In SPARK-6414, sc.cancelJobGroup will cause NullPointerException and cause // SparkContext to shutdown, so the following assertion will fail. assert(sc.parallelize(1 to 10).count() == 10L) } finally { sc.stop() } } test("Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)") { // Regression test for SPARK-7155 // dir1 and dir2 are used for wholeTextFiles and binaryFiles val dir1 = Utils.createTempDir() val dir2 = Utils.createTempDir() val dirpath1 = dir1.getAbsolutePath val dirpath2 = dir2.getAbsolutePath // file1 and file2 are placed inside dir1, they are also used for // textFile, hadoopFile, and newAPIHadoopFile // file3, file4 and file5 are placed inside dir2, they are used for // textFile, hadoopFile, and newAPIHadoopFile as well val file1 = new File(dir1, "part-00000") val file2 = new File(dir1, "part-00001") val file3 = new File(dir2, "part-00000") val file4 = new File(dir2, "part-00001") val file5 = new File(dir2, "part-00002") val filepath1 = file1.getAbsolutePath val filepath2 = file2.getAbsolutePath val filepath3 = file3.getAbsolutePath val filepath4 = file4.getAbsolutePath val filepath5 = file5.getAbsolutePath try { // Create 5 text files. Files.write("someline1 in file1\nsomeline2 in file1\nsomeline3 in file1", file1, StandardCharsets.UTF_8) Files.write("someline1 in file2\nsomeline2 in file2", file2, StandardCharsets.UTF_8) Files.write("someline1 in file3", file3, StandardCharsets.UTF_8) Files.write("someline1 in file4\nsomeline2 in file4", file4, StandardCharsets.UTF_8) Files.write("someline1 in file2\nsomeline2 in file5", file5, StandardCharsets.UTF_8) sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) // Test textFile, hadoopFile, and newAPIHadoopFile for file1 and file2 assert(sc.textFile(filepath1 + "," + filepath2).count() == 5L) assert(sc.hadoopFile(filepath1 + "," + filepath2, classOf[TextInputFormat], classOf[LongWritable], classOf[Text]).count() == 5L) assert(sc.newAPIHadoopFile(filepath1 + "," + filepath2, classOf[NewTextInputFormat], classOf[LongWritable], classOf[Text]).count() == 5L) // Test textFile, hadoopFile, and newAPIHadoopFile for file3, file4, and file5 assert(sc.textFile(filepath3 + "," + filepath4 + "," + filepath5).count() == 5L) assert(sc.hadoopFile(filepath3 + "," + filepath4 + "," + filepath5, classOf[TextInputFormat], classOf[LongWritable], classOf[Text]).count() == 5L) assert(sc.newAPIHadoopFile(filepath3 + "," + filepath4 + "," + filepath5, classOf[NewTextInputFormat], classOf[LongWritable], classOf[Text]).count() == 5L) // Test wholeTextFiles, and binaryFiles for dir1 and dir2 assert(sc.wholeTextFiles(dirpath1 + "," + dirpath2).count() == 5L) assert(sc.binaryFiles(dirpath1 + "," + dirpath2).count() == 5L) } finally { sc.stop() } } test("Default path for file based RDDs is properly set (SPARK-12517)") { sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) // Test filetextFile, wholeTextFiles, binaryFiles, hadoopFile and // newAPIHadoopFile for setting the default path as the RDD name val mockPath = "default/path/for/" var targetPath = mockPath + "textFile" assert(sc.textFile(targetPath).name === targetPath) targetPath = mockPath + "wholeTextFiles" assert(sc.wholeTextFiles(targetPath).name === targetPath) targetPath = mockPath + "binaryFiles" assert(sc.binaryFiles(targetPath).name === targetPath) targetPath = mockPath + "hadoopFile" assert(sc.hadoopFile(targetPath).name === targetPath) targetPath = mockPath + "newAPIHadoopFile" assert(sc.newAPIHadoopFile(targetPath).name === targetPath) sc.stop() } test("calling multiple sc.stop() must not throw any exception") { noException should be thrownBy { sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) val cnt = sc.parallelize(1 to 4).count() sc.cancelAllJobs() sc.stop() // call stop second time sc.stop() } } test("No exception when both num-executors and dynamic allocation set.") { noException should be thrownBy { sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local") .set("spark.dynamicAllocation.enabled", "true").set("spark.executor.instances", "6")) assert(sc.executorAllocationManager.isEmpty) assert(sc.getConf.getInt("spark.executor.instances", 0) === 6) } } test("localProperties are inherited by spawned threads.") { sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) sc.setLocalProperty("testProperty", "testValue") var result = "unset"; val thread = new Thread() { override def run() = {result = sc.getLocalProperty("testProperty")}} thread.start() thread.join() sc.stop() assert(result == "testValue") } test("localProperties do not cross-talk between threads.") { sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) var result = "unset"; val thread1 = new Thread() { override def run() = {sc.setLocalProperty("testProperty", "testValue")}} // testProperty should be unset and thus return null val thread2 = new Thread() { override def run() = {result = sc.getLocalProperty("testProperty")}} thread1.start() thread1.join() thread2.start() thread2.join() sc.stop() assert(result == null) } test("log level case-insensitive and reset log level") { sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) val originalLevel = org.apache.log4j.Logger.getRootLogger().getLevel try { sc.setLogLevel("debug") assert(org.apache.log4j.Logger.getRootLogger().getLevel === org.apache.log4j.Level.DEBUG) sc.setLogLevel("INfo") assert(org.apache.log4j.Logger.getRootLogger().getLevel === org.apache.log4j.Level.INFO) } finally { sc.setLogLevel(originalLevel.toString) assert(org.apache.log4j.Logger.getRootLogger().getLevel === originalLevel) sc.stop() } } test("register and deregister Spark listener from SparkContext") { sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) val sparkListener1 = new SparkListener { } val sparkListener2 = new SparkListener { } sc.addSparkListener(sparkListener1) sc.addSparkListener(sparkListener2) assert(sc.listenerBus.listeners.contains(sparkListener1)) assert(sc.listenerBus.listeners.contains(sparkListener2)) sc.removeSparkListener(sparkListener1) assert(!sc.listenerBus.listeners.contains(sparkListener1)) assert(sc.listenerBus.listeners.contains(sparkListener2)) } test("Cancelling stages/jobs with custom reasons.") { sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) val REASON = "You shall not pass" val listener = new SparkListener { override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { if (SparkContextSuite.cancelStage) { eventually(timeout(10.seconds)) { assert(SparkContextSuite.isTaskStarted) } sc.cancelStage(taskStart.stageId, REASON) SparkContextSuite.cancelStage = false } } override def onJobStart(jobStart: SparkListenerJobStart): Unit = { if (SparkContextSuite.cancelJob) { eventually(timeout(10.seconds)) { assert(SparkContextSuite.isTaskStarted) } sc.cancelJob(jobStart.jobId, REASON) SparkContextSuite.cancelJob = false } } } sc.addSparkListener(listener) for (cancelWhat <- Seq("stage", "job")) { SparkContextSuite.isTaskStarted = false SparkContextSuite.cancelStage = (cancelWhat == "stage") SparkContextSuite.cancelJob = (cancelWhat == "job") val ex = intercept[SparkException] { sc.range(0, 10000L).mapPartitions { x => org.apache.spark.SparkContextSuite.isTaskStarted = true x }.cartesian(sc.range(0, 10L))count() } ex.getCause() match { case null => assert(ex.getMessage().contains(REASON)) case cause: SparkException => assert(cause.getMessage().contains(REASON)) case cause: Throwable => fail("Expected the cause to be SparkException, got " + cause.toString() + " instead.") } eventually(timeout(20.seconds)) { assert(sc.statusTracker.getExecutorInfos.map(_.numRunningTasks()).sum == 0) } } } // Launches one task that will run forever. Once the SparkListener detects the task has // started, kill and re-schedule it. The second run of the task will complete immediately. // If this test times out, then the first version of the task wasn't killed successfully. test("Killing tasks") { sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) SparkContextSuite.isTaskStarted = false SparkContextSuite.taskKilled = false SparkContextSuite.taskSucceeded = false val listener = new SparkListener { override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { eventually(timeout(10.seconds)) { assert(SparkContextSuite.isTaskStarted) } if (!SparkContextSuite.taskKilled) { SparkContextSuite.taskKilled = true sc.killTaskAttempt(taskStart.taskInfo.taskId, true, "first attempt will hang") } } override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = { if (taskEnd.taskInfo.attemptNumber == 1 && taskEnd.reason == Success) { SparkContextSuite.taskSucceeded = true } } } sc.addSparkListener(listener) eventually(timeout(20.seconds)) { sc.parallelize(1 to 1).foreach { x => // first attempt will hang if (!SparkContextSuite.isTaskStarted) { SparkContextSuite.isTaskStarted = true try { Thread.sleep(9999999) } catch { case t: Throwable => // SPARK-20217 should not fail stage if task throws non-interrupted exception throw new RuntimeException("killed") } } // second attempt succeeds immediately } } eventually(timeout(10.seconds)) { assert(SparkContextSuite.taskSucceeded) } } test("SPARK-19446: DebugFilesystem.assertNoOpenStreams should report " + "open streams to help debugging") { val fs = new DebugFilesystem() fs.initialize(new URI("file:///"), new Configuration()) val file = File.createTempFile("SPARK19446", "temp") Files.write(Array.ofDim[Byte](1000), file) val path = new Path("file:///" + file.getCanonicalPath) val stream = fs.open(path) val exc = intercept[RuntimeException] { DebugFilesystem.assertNoOpenStreams() } assert(exc != null) assert(exc.getCause() != null) stream.close() } } object SparkContextSuite { @volatile var cancelJob = false @volatile var cancelStage = false @volatile var isTaskStarted = false @volatile var taskKilled = false @volatile var taskSucceeded = false }
JerryLead/spark
core/src/test/scala/org/apache/spark/SparkContextSuite.scala
Scala
apache-2.0
23,277
// Copyright (c) 2017 Philipp Meyerhoefer // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package uk.co.faydark.mrmocks import uk.co.faydark.mrmocks.helpers.SumReducer import org.apache.hadoop.io.{LongWritable, Text} import org.apache.log4j.BasicConfigurator import org.scalatest.{BeforeAndAfterAll, FlatSpec} class ReduceDriverTest extends FlatSpec with BeforeAndAfterAll { behavior of "ReduceDriver" val a = new Text("a") val b = new Text("b") val one = new LongWritable(1L) val two = new LongWritable(2L) val five = new LongWritable(5L) val seven = new LongWritable(7L) val eleven = new LongWritable(11L) it should "work with SumReducer" in new ReduceDriver[Text, LongWritable, Text, LongWritable, SumReducer]( input = Seq(a -> one, a -> two, a -> five, b -> seven, b -> eleven), expectedOutput = Seq(a -> new LongWritable(8L), b -> new LongWritable(18L)) ).verify() it should "work with SumReducer 2" in new ReduceDriver( input = Seq(a -> one, a -> two, a -> five, b -> seven, b -> eleven), expectedOutput = Seq(a -> new LongWritable(8L), b -> new LongWritable(18L)) )(Manifest.classType(classOf[SumReducer])). verify() override protected def beforeAll() = BasicConfigurator.resetConfiguration() }
barkhorn/mrmocks
src/test/scala/uk/co/faydark/mrmocks/ReduceDriverTest.scala
Scala
mit
2,305
package com.ubeeko.htalk.criteria import com.ubeeko.htalk.bytesconv._ import com.ubeeko.htalk.hbase.{HTalkContext, TestHBaseManager} import com.ubeeko.htalk.tests.TestCommons import org.apache.hadoop.hbase.filter.ColumnPrefixFilter import org.specs2.mutable.Specification class ResultSpec extends Specification { val table = "result-spec-test" implicit val hTalkContext = TestCommons.newContext() hTalkContext.createTable(table) val translations: Map[String, Map[String, String]] = Map( "one" -> Map("fr" -> "un" , "es" -> "uno"), "two" -> Map("fr" -> "deux", "es" -> "dos") ) translations foreach { case (numberInEnglish, langMap) => langMap foreach { case (lang, numberInLang) => table.put(numberInEnglish, lang, numberInLang).execute } } "getCells" should { "return an empty map if the result is empty" in { val r = table.get("non-existent-rowkey").execute.head r.isEmpty must beTrue r.getCells() must beEmpty } "return the columns of the specified family" in { val number = "one" val r = table.get(number).execute.head r.nonEmpty must beTrue val cells = r.getCells() // Contains exactly two cells and these cells must match the // data initially stored above. cells.size must_== 2 val conv = bytesTo[String] _ val stringCells = cells map { case (qualifier, value) => (conv(qualifier), conv(value)) } val langs = translations(number) langs forall { case (lang, numberInLang) => stringCells.get(lang) must beSome(numberInLang) } } } }
eric-leblouch/htalk
src/test/scala/com/ubeeko/htalk/criteria/ResultSpec.scala
Scala
apache-2.0
1,618
package com.sksamuel.scapegoat.inspections.unneccesary import scala.reflect.internal.Flags import com.sksamuel.scapegoat._ /** * @author * Stephen Samuel */ class UnusedMethodParameter extends Inspection( text = "Unused parameter", defaultLevel = Levels.Warning, description = "Checks for unused method parameters.", explanation = "Unused constructor or method parameters should be removed." ) { def inspector(context: InspectionContext): Inspector = new Inspector(context) { override def postTyperTraverser: context.Traverser = new context.Traverser { import context.global._ import definitions._ private def usesParameter(paramName: String, tree: Tree): Boolean = { tree match { case Ident(TermName(name)) => name == paramName case _ => tree.children.exists(usesParameter(paramName, _)) } } private def usesField(paramName: String, tree: Tree): Boolean = { tree match { case Select(This(_), TermName(name)) => // FIXME: why is "trim" needed here? Is that a scalac bug? // A test will fail if you take this out! name.trim == paramName case _ => tree.children.exists(usesField(paramName, _)) } } private def isParameterExcused(param: ValDef): Boolean = param.symbol.annotations.exists(_.atp.toString == "scala.annotation.unused") /** * For constructor params, some params become vals / fields of the class: * 1. all params in the first argument list for case classes * 1. all params marked "val" * * In both cases, by the time we see the tree, a "def x = this.x" method will have been added by the * compiler, so "usesField" will notice and not mark the param as unused. */ private def checkConstructor( vparamss: List[List[ValDef]], constructorBody: Tree, classBody: Tree ): Unit = { for { vparams <- vparamss vparam <- vparams } { val paramName = vparam.name.toString if (!usesParameter(paramName, constructorBody) && !usesField(paramName, classBody)) context.warn(vparam.pos, self, s"Unused constructor parameter (${vparam.name}).") } } override def inspect(tree: Tree): Unit = { tree match { // ignore traits, quite often you define a method in a trait with default impl that does nothing case ClassDef(_, _, _, _) if tree.symbol.isTrait => case ClassDef(mods, _, _, _) if mods.hasAbstractFlag => case ClassDef(_, _, _, classBody @ Template(_, _, classTopLevelStmts)) => classTopLevelStmts.foreach { case DefDef(_, nme.CONSTRUCTOR, _, vparamss, _, constructorBody) => checkConstructor(vparamss, constructorBody, classBody) case DefDef(_, _, _, vparamss, _, constructorBody) if tree.symbol != null && tree.symbol.isConstructor => checkConstructor(vparamss, constructorBody, classBody) case _ => } continue(tree) // ignore abstract methods obv. case DefDef(mods, _, _, _, _, _) if mods.hasFlag(Flag.ABSTRACT) => case d @ DefDef(_, _, _, _, _, _) if d.symbol != null && d.symbol.isAbstract => // ignore constructors, they're handled above case DefDef(_, nme.CONSTRUCTOR, _, _, _, _) => case DefDef(_, _, _, _, _, _) if tree.symbol != null && tree.symbol.isConstructor => // ignore methods that just throw, e.g. "???" case DefDef(_, _, _, _, tpt, _) if tpt.tpe =:= NothingTpe => // ignore methods that just throw, e.g. "???" or "js.native" case DefDef(_, _, _, _, _, rhs) if rhs.tpe =:= NothingTpe => // ignore overridden methods, the parameter might be used by other classes case DefDef(mods, _, _, _, _, _) if mods.isOverride || mods.hasFlag(Flags.OVERRIDE) || (tree.symbol != null && (tree.symbol.isAnyOverride || tree.symbol.isOverridingSymbol)) => // ignore main method case DefDef(_, name, _, List(List(param)), tpt, _) if name.toString == "main" && param.name.toString == "args" && tpt.tpe =:= UnitTpe && param.tpt.tpe =:= typeOf[Array[String]] => case DefDef(_, _, _, vparamss, _, rhs) => for { vparams <- vparamss vparam <- vparams } if (!isParameterExcused(vparam) && !usesParameter(vparam.name.toString, rhs)) { context.warn(tree.pos, self, s"Unused method parameter ($vparam).") } case _ => continue(tree) } } } } }
sksamuel/scapegoat
src/main/scala/com/sksamuel/scapegoat/inspections/unneccesary/UnusedMethodParameter.scala
Scala
apache-2.0
5,339
package io.youi.style object HTMLBorderStyle extends Stringify[HTMLBorderStyle] { case object None extends HTMLBorderStyle("none") case object Hidden extends HTMLBorderStyle("hidden") case object Dotted extends HTMLBorderStyle("dotted") case object Dashed extends HTMLBorderStyle("dashed") case object Solid extends HTMLBorderStyle("solid") case object Double extends HTMLBorderStyle("double") case object Groove extends HTMLBorderStyle("groove") case object Ridge extends HTMLBorderStyle("ridge") case object Inset extends HTMLBorderStyle("inset") case object Outset extends HTMLBorderStyle("outset") private lazy val map = List(None, Hidden, Dotted, Dashed, Solid, Double, Groove, Ridge, Inset, Outset) .map(s => s.value -> s) .toMap override def fromString(value: String): Option[HTMLBorderStyle] = map.get(value) override def toString(value: HTMLBorderStyle): Option[String] = Some(value.value) } sealed abstract class HTMLBorderStyle(val value: String)
outr/youi
ui/js/src/main/scala/io/youi/style/HTMLBorderStyle.scala
Scala
mit
996
package cromwell.engine.db.slick import java.sql.Clob import cromwell.engine.ExecutionIndex import cromwell.engine.ExecutionIndex._ case class Symbol ( workflowExecutionId: Int, scope: String, name: String, index: Int, // https://bugs.mysql.com/bug.php?id=8173 io: String, reportableResult: Boolean, wdlType: String, wdlValue: Option[Clob], symbolHash: Option[String], symbolId: Option[Int] = None ) trait SymbolComponent { this: DriverComponent with WorkflowExecutionComponent => import driver.api._ class Symbols(tag: Tag) extends Table[Symbol](tag, "SYMBOL") { def symbolId = column[Int]("SYMBOL_ID", O.PrimaryKey, O.AutoInc) def workflowExecutionId = column[Int]("WORKFLOW_EXECUTION_ID") def scope = column[String]("SCOPE") def name = column[String]("NAME") def index = column[Int]("INDEX") def io = column[String]("IO") def reportableResult = column[Boolean]("REPORTABLE_RESULT") def wdlType = column[String]("WDL_TYPE") def wdlValue = column[Option[Clob]]("WDL_VALUE") def symbolHash = column[Option[String]]("HASH") override def * = (workflowExecutionId, scope, name, index, io, reportableResult, wdlType, wdlValue, symbolHash, symbolId.?) <> (Symbol.tupled, Symbol.unapply) def workflowExecution = foreignKey( "FK_SYMBOL_WORKFLOW_EXECUTION_ID", workflowExecutionId, workflowExecutions)(_.workflowExecutionId) def uniqueKey = index("UK_SYM_WORKFLOW_EXECUTION_ID_SCOPE_NAME_ITERATION_IO", (workflowExecutionId, scope, name, index, io), unique = true) } protected val symbols = TableQuery[Symbols] val symbolsAutoInc = symbols returning symbols. map(_.symbolId) into ((a, id) => a.copy(symbolId = Some(id))) // Convenience function def symbolsByWorkflowExecutionUuidAndIoAndMaybeScope(workflowExecutionUuid: String, io: String, scopeOption: Option[String], indexOption: Option[Int]) = { scopeOption match { case Some(scope) => symbolsByWorkflowExecutionUuidAndIoAndScopeAndIndex(workflowExecutionUuid, io, scope, indexOption.fromIndex) case None => symbolsByWorkflowExecutionUuidAndIoNoIndex(workflowExecutionUuid, io) } } val allSymbols = Compiled( (workflowExecutionUuid: Rep[String]) => for { symbol <- symbols workflowExecution <- symbol.workflowExecution if workflowExecution.workflowExecutionUuid === workflowExecutionUuid } yield symbol) val symbolsByWorkflowExecutionUuidAndIo = Compiled( (workflowExecutionUuid: Rep[String], io: Rep[String]) => for { symbol <- symbols if symbol.io === io workflowExecution <- symbol.workflowExecution if workflowExecution.workflowExecutionUuid === workflowExecutionUuid } yield symbol) val symbolsByWorkflowExecutionUuidAndIoNoIndex = Compiled( (workflowExecutionUuid: Rep[String], io: Rep[String]) => for { symbol <- symbols if symbol.io === io && symbol.index === ExecutionIndex.IndexNone workflowExecution <- symbol.workflowExecution if workflowExecution.workflowExecutionUuid === workflowExecutionUuid } yield symbol) val symbolsByWorkflowExecutionUuidAndIoAndScopeAndIndex = Compiled( (workflowExecutionUuid: Rep[String], io: Rep[String], scope: Rep[String], index: Rep[Int]) => for { symbol <- symbols if symbol.io === io && symbol.scope === scope && symbol.index === index workflowExecution <- symbol.workflowExecution if workflowExecution.workflowExecutionUuid === workflowExecutionUuid } yield symbol) def symbolsFilterByWorkflowAndScopeAndNameAndIndex(workflowExecutionId: Int, scope: String, name: String, index: Int) = { val workflowFilteredQuery = for { symbol <- symbols if symbol.workflowExecutionId === workflowExecutionId } yield symbol workflowFilteredQuery filter { s => s.scope === scope && s.name === name && s.index === index } } val symbolsForWorkflowOutput = Compiled( (workflowExecutionUuid: Rep[String]) => for { symbol <- symbols if symbol.reportableResult === true workflowExecution <- symbol.workflowExecution if workflowExecution.workflowExecutionUuid === workflowExecutionUuid } yield symbol) }
dgtester/cromwell
src/main/scala/cromwell/engine/db/slick/SymbolComponent.scala
Scala
bsd-3-clause
4,305
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License */ package org.apache.toree.kernel.protocol.v5.handler import org.apache.toree.comm.{CommRegistrar, CommStorage, KernelCommWriter} import org.apache.toree.kernel.protocol.v5.content.{ShutdownReply, ShutdownRequest, CommOpen} import org.apache.toree.kernel.protocol.v5.kernel.{ActorLoader, Utilities} import org.apache.toree.kernel.protocol.v5._ import org.apache.toree.security.KernelSecurityManager import org.apache.toree.utils.MessageLogSupport import play.api.libs.json.JsonValidationError import play.api.libs.json.JsPath import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future /** * Represents the handler to shutdown the kernel * * @param actorLoader The actor loader to use for actor communication */ class ShutdownHandler( actorLoader: ActorLoader ) extends BaseHandler(actorLoader) with MessageLogSupport { override def process(kernelMessage: KernelMessage): Future[_] = Future { logKernelMessageAction("Initiating Shutdown request for", kernelMessage) val kernelInfo = SparkKernelInfo val shutdownReply = ShutdownReply(false) val replyHeader = Header( java.util.UUID.randomUUID.toString, "", java.util.UUID.randomUUID.toString, ShutdownReply.toTypeString, kernelInfo.protocolVersion) val kernelResponseMessage = KMBuilder() .withIds(kernelMessage.ids) .withSignature("") .withHeader(replyHeader) .withParent(kernelMessage) .withContentString(shutdownReply).build logger.debug("Attempting graceful shutdown.") actorLoader.load(SystemActorType.KernelMessageRelay) ! kernelResponseMessage // Instruct security manager that exit should be allowed KernelSecurityManager.enableRestrictedExit() System.exit(0) } }
lresende/incubator-toree
kernel/src/main/scala/org/apache/toree/kernel/protocol/v5/handler/ShutdownHandler.scala
Scala
apache-2.0
2,584
/* * Copyright 2014–2018 SlamData Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar.compile import slamdata.Predef._ import quasar.{VarName, Variables} import quasar.sql._ import matryoshka.data.Fix import scalaz._, Scalaz._ class VariableSubstitutionSpec extends quasar.Qspec { "list all missing variables" >> { "all are missing" >> { substVars[SemanticErrors \\/ ?, Fix](fixParser, sqlE"select * from :foo where :baz", Variables.empty) must_=== NonEmptyList( SemanticError.unboundVariable(VarName("foo")), SemanticError.unboundVariable(VarName("baz"))).left } "some are missing" >> { val vars = Variables.fromMap(Map("baz" -> "age = 7")) substVars[SemanticErrors \\/ ?, Fix](fixParser, sqlE"select :biz from :foo where :baz", vars) must_=== NonEmptyList( SemanticError.unboundVariable(VarName("foo")), SemanticError.unboundVariable(VarName("biz"))).left } } "succeedd when all variables are present" >> { val vars = Variables.fromMap(Map("biz" -> "name", "foo" -> "people", "baz" -> "age = 7")) substVars[SemanticErrors \\/ ?, Fix](fixParser, sqlE"select :biz from :foo where :baz", vars) must_=== sqlE"select name from people where age = 7".right } }
slamdata/slamengine
core/src/test/scala/quasar/compile/VariableSubstitutionSpec.scala
Scala
apache-2.0
1,805
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.openwhisk.core.yarn import akka.actor.{ActorRef, ActorSystem, Props} import akka.http.scaladsl.model.{HttpMethods, StatusCodes} import akka.pattern.ask import akka.util.Timeout import org.apache.openwhisk.common.{Logging, TransactionId} import org.apache.openwhisk.core.containerpool._ import org.apache.openwhisk.core.entity.ExecManifest.ImageName import org.apache.openwhisk.core.entity.{ByteSize, ExecManifest, InvokerInstanceId} import org.apache.openwhisk.core.yarn.YARNComponentActor.CreateContainerAsync import org.apache.openwhisk.core.{ConfigKeys, WhiskConfig} import pureconfig.loadConfigOrThrow import spray.json._ import scala.collection.immutable.HashMap import scala.concurrent.{blocking, ExecutionContext, Future} import scala.concurrent.duration._ import YARNJsonProtocol._ import akka.stream.ActorMaterializer case class YARNConfig(masterUrl: String, yarnLinkLogMessage: Boolean, serviceName: String, authType: String, kerberosPrincipal: String, kerberosKeytab: String, queue: String, memory: String, cpus: Int) object YARNContainerFactoryProvider extends ContainerFactoryProvider { override def instance(actorSystem: ActorSystem, logging: Logging, config: WhiskConfig, instance: InvokerInstanceId, parameters: Map[String, Set[String]]): ContainerFactory = new YARNContainerFactory(actorSystem, logging, config, instance, parameters) } class YARNContainerFactory(actorSystem: ActorSystem, logging: Logging, config: WhiskConfig, instance: InvokerInstanceId, parameters: Map[String, Set[String]], containerArgs: ContainerArgsConfig = loadConfigOrThrow[ContainerArgsConfig](ConfigKeys.containerArgs), yarnConfig: YARNConfig = loadConfigOrThrow[YARNConfig](ConfigKeys.yarn)) extends ContainerFactory { val images: Set[ImageName] = ExecManifest.runtimesManifest.runtimes.flatMap(a => a.versions.map(b => b.image)) //One actor of each type per image for parallelism private var yarnComponentActors: Map[ImageName, ActorRef] = HashMap[ImageName, ActorRef]() private var YARNContainerInfoActors: Map[ImageName, ActorRef] = HashMap[ImageName, ActorRef]() val serviceStartTimeoutMS = 60000 val retryWaitMS = 1000 val runCommand = "" val version = "1.0.0" val description = "OpenWhisk Action Service" //Allows for invoker HA val serviceName: String = yarnConfig.serviceName + "-" + instance.toInt val containerStartTimeoutMS = 60000 implicit val as: ActorSystem = actorSystem implicit val materializer: ActorMaterializer = ActorMaterializer() implicit val ec: ExecutionContext = actorSystem.dispatcher override def init(): Unit = { yarnComponentActors = images .map( i => ( i, actorSystem.actorOf( Props(new YARNComponentActor(actorSystem, logging, yarnConfig, serviceName, i)), name = s"YARNComponentActor-${i.name}"))) .toMap YARNContainerInfoActors = images .map( i => ( i, actorSystem.actorOf( Props(new YARNContainerInfoActor(actorSystem, logging, yarnConfig, serviceName, i)), name = s"YARNComponentInfoActor-${i.name}"))) .toMap blocking { implicit val timeout: Timeout = Timeout(serviceStartTimeoutMS.milliseconds) //Remove service if it already exists val serviceDef = YARNRESTUtil.downloadServiceDefinition(yarnConfig.authType, serviceName, yarnConfig.masterUrl)(logging) if (serviceDef != null) removeService() createService() } } override def createContainer( unusedtid: TransactionId, unusedname: String, actionImage: ExecManifest.ImageName, unuseduserProvidedImage: Boolean, unusedmemory: ByteSize, unusedcpuShares: Int)(implicit config: WhiskConfig, logging: Logging): Future[Container] = { implicit val timeout: Timeout = Timeout(containerStartTimeoutMS.milliseconds) //First send the create command to YARN, then with a different actor, wait for the container to be ready ask(yarnComponentActors(actionImage), CreateContainerAsync).flatMap(_ => ask(YARNContainerInfoActors(actionImage), GetContainerInfo(yarnComponentActors(actionImage))).mapTo[Container]) } override def cleanup(): Unit = { removeService() yarnComponentActors foreach { case (k, v) => actorSystem.stop(v) } YARNContainerInfoActors foreach { case (k, v) => actorSystem.stop(v) } } def createService(): Unit = { logging.info(this, "Creating Service with images: " + images.map(i => i.publicImageName).mkString(", ")) val componentList = images .map( i => ComponentDefinition( i.name.replace('.', '-'), //name must be [a-z][a-z0-9-]* Some(0), //start with zero containers Some(runCommand), Option.empty, Some(ArtifactDefinition(i.publicImageName, "DOCKER")), Some(ResourceDefinition(yarnConfig.cpus, yarnConfig.memory)), Some(ConfigurationDefinition(Map(("YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE", "true")))), List[String]())) .toList //Add kerberos def if necessary var kerberosDef: Option[KerberosPrincipalDefinition] = None if (yarnConfig.authType.equals(YARNRESTUtil.KERBEROSAUTH)) kerberosDef = Some( KerberosPrincipalDefinition(Some(yarnConfig.kerberosPrincipal), Some(yarnConfig.kerberosKeytab))) val service = ServiceDefinition( Some(serviceName), Some(version), Some(description), Some("STABLE"), Some(yarnConfig.queue), componentList, kerberosDef) //Submit service val response = YARNRESTUtil.submitRequestWithAuth( yarnConfig.authType, HttpMethods.POST, s"${yarnConfig.masterUrl}/app/v1/services", service.toJson.compactPrint) //Handle response response match { case httpresponse(StatusCodes.OK, content) => logging.info(this, s"Service submitted. Response: $content") case httpresponse(StatusCodes.Accepted, content) => logging.info(this, s"Service submitted. Response: $content") case httpresponse(_, _) => YARNRESTUtil.handleYARNRESTError(logging) } //Wait for service start (up to serviceStartTimeoutMS milliseconds) var started = false var retryCount = 0 val maxRetryCount = serviceStartTimeoutMS / retryWaitMS while (!started && retryCount < maxRetryCount) { val serviceDef = YARNRESTUtil.downloadServiceDefinition(yarnConfig.authType, serviceName, yarnConfig.masterUrl)(logging) if (serviceDef == null) { logging.info(this, "Service not found yet") Thread.sleep(retryWaitMS) } else { serviceDef.state.getOrElse(None) match { case "STABLE" | "STARTED" => logging.info(this, "YARN service achieved stable state") started = true case state => logging.info( this, s"YARN service is not in stable state yet ($retryCount/$maxRetryCount). Current state: $state") Thread.sleep(retryWaitMS) } } retryCount += 1 } if (!started) throw new Exception(s"After ${serviceStartTimeoutMS}ms YARN service did not achieve stable state") } def removeService(): Unit = { val response: httpresponse = YARNRESTUtil.submitRequestWithAuth( yarnConfig.authType, HttpMethods.DELETE, s"${yarnConfig.masterUrl}/app/v1/services/$serviceName", "") response match { case httpresponse(StatusCodes.OK, _) => logging.info(this, "YARN service Removed") case httpresponse(StatusCodes.NotFound, _) => logging.warn(this, "YARN service did not exist") case httpresponse(StatusCodes.BadRequest, _) => logging.warn(this, "YARN service did not exist") case httpresponse(_, _) => YARNRESTUtil.handleYARNRESTError(logging) } } }
houshengbo/openwhisk
common/scala/src/main/scala/org/apache/openwhisk/core/yarn/YARNContainerFactory.scala
Scala
apache-2.0
9,263
object Test { abstract class Number case class MyInt(n: Int) extends Number case class MyDouble(d: Double) extends Number trait Term[a] case class Cell[a](var x: a) extends Term[a] final case class NumTerm(val n: Number) extends Term[Number] def f[a](t: Term[a], c: Cell[a]) { t match { case NumTerm(n) => c.x = MyDouble(1.0) } } val x: Term[Number] = NumTerm(MyInt(5)) def main(args: Array[String]) { val cell = Cell[Number](MyInt(6)) Console.println(cell) f[Number](new NumTerm(MyInt(5)), cell) Console.println(cell) } }
loskutov/intellij-scala
testdata/scalacTests/failed/gadts2.scala
Scala
apache-2.0
580
package a4.shard.routing import java.lang.Exception class RoutingException extends Exception class RequestParsingException extends RoutingException class UnknownMethodException(val method: String) extends RequestParsingException class UnknownRouteException extends RoutingException
aggregat4/shard
src/main/scala/a4/shard/routing/RoutingException.scala
Scala
apache-2.0
296
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.streaming import java.io.File import java.sql.Timestamp import java.util.{Locale, UUID} import scala.util.Random import org.apache.commons.io.FileUtils import org.scalatest.BeforeAndAfter import org.apache.spark.scheduler.ExecutorCacheTaskLocation import org.apache.spark.sql.{DataFrame, Row, SparkSession} import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec import org.apache.spark.sql.execution.streaming.{MemoryStream, StatefulOperatorStateInfo, StreamingSymmetricHashJoinExec, StreamingSymmetricHashJoinHelper} import org.apache.spark.sql.execution.streaming.state.{StateStore, StateStoreProviderId} import org.apache.spark.sql.functions._ import org.apache.spark.util.Utils abstract class StreamingJoinSuite extends StreamTest with StateStoreMetricsTest with BeforeAndAfter { import testImplicits._ before { SparkSession.setActiveSession(spark) // set this before force initializing 'joinExec' spark.streams.stateStoreCoordinator // initialize the lazy coordinator } after { StateStore.stop() } protected def setupStream(prefix: String, multiplier: Int): (MemoryStream[Int], DataFrame) = { val input = MemoryStream[Int] val df = input.toDF .select( 'value as "key", timestamp_seconds($"value") as s"${prefix}Time", ('value * multiplier) as s"${prefix}Value") .withWatermark(s"${prefix}Time", "10 seconds") (input, df) } protected def setupWindowedJoin(joinType: String) : (MemoryStream[Int], MemoryStream[Int], DataFrame) = { val (input1, df1) = setupStream("left", 2) val (input2, df2) = setupStream("right", 3) val windowed1 = df1.select('key, window('leftTime, "10 second"), 'leftValue) val windowed2 = df2.select('key, window('rightTime, "10 second"), 'rightValue) val joined = windowed1.join(windowed2, Seq("key", "window"), joinType) val select = if (joinType == "left_semi") { joined.select('key, $"window.end".cast("long"), 'leftValue) } else { joined.select('key, $"window.end".cast("long"), 'leftValue, 'rightValue) } (input1, input2, select) } protected def setupWindowedJoinWithLeftCondition(joinType: String) : (MemoryStream[Int], MemoryStream[Int], DataFrame) = { val (leftInput, df1) = setupStream("left", 2) val (rightInput, df2) = setupStream("right", 3) // Use different schemas to ensure the null row is being generated from the correct side. val left = df1.select('key, window('leftTime, "10 second"), 'leftValue) val right = df2.select('key, window('rightTime, "10 second"), 'rightValue.cast("string")) val joined = left.join( right, left("key") === right("key") && left("window") === right("window") && 'leftValue > 4, joinType) val select = if (joinType == "left_semi") { joined.select(left("key"), left("window.end").cast("long"), 'leftValue) } else if (joinType == "left_outer") { joined.select(left("key"), left("window.end").cast("long"), 'leftValue, 'rightValue) } else if (joinType == "right_outer") { joined.select(right("key"), right("window.end").cast("long"), 'leftValue, 'rightValue) } else { joined.select(left("key"), left("window.end").cast("long"), 'leftValue, right("key"), right("window.end").cast("long"), 'rightValue) } (leftInput, rightInput, select) } protected def setupWindowedJoinWithRightCondition(joinType: String) : (MemoryStream[Int], MemoryStream[Int], DataFrame) = { val (leftInput, df1) = setupStream("left", 2) val (rightInput, df2) = setupStream("right", 3) // Use different schemas to ensure the null row is being generated from the correct side. val left = df1.select('key, window('leftTime, "10 second"), 'leftValue) val right = df2.select('key, window('rightTime, "10 second"), 'rightValue.cast("string")) val joined = left.join( right, left("key") === right("key") && left("window") === right("window") && 'rightValue.cast("int") > 7, joinType) val select = if (joinType == "left_semi") { joined.select(left("key"), left("window.end").cast("long"), 'leftValue) } else if (joinType == "left_outer") { joined.select(left("key"), left("window.end").cast("long"), 'leftValue, 'rightValue) } else if (joinType == "right_outer") { joined.select(right("key"), right("window.end").cast("long"), 'leftValue, 'rightValue) } else { joined.select(left("key"), left("window.end").cast("long"), 'leftValue, right("key"), right("window.end").cast("long"), 'rightValue) } (leftInput, rightInput, select) } protected def setupJoinWithRangeCondition( joinType: String, watermark: String = "10 seconds", lowerBound: String = "interval 5 seconds", upperBound: String = "interval 5 seconds") : (MemoryStream[(Int, Int)], MemoryStream[(Int, Int)], DataFrame) = { val leftInput = MemoryStream[(Int, Int)] val rightInput = MemoryStream[(Int, Int)] val df1 = leftInput.toDF.toDF("leftKey", "time") .select('leftKey, timestamp_seconds($"time") as "leftTime", ('leftKey * 2) as "leftValue") .withWatermark("leftTime", watermark) val df2 = rightInput.toDF.toDF("rightKey", "time") .select('rightKey, timestamp_seconds($"time") as "rightTime", ('rightKey * 3) as "rightValue") .withWatermark("rightTime", watermark) val joined = df1.join( df2, expr("leftKey = rightKey AND " + s"leftTime BETWEEN rightTime - $lowerBound AND rightTime + $upperBound"), joinType) val select = if (joinType == "left_semi") { joined.select('leftKey, 'leftTime.cast("int")) } else { joined.select('leftKey, 'rightKey, 'leftTime.cast("int"), 'rightTime.cast("int")) } (leftInput, rightInput, select) } protected def setupSelfJoin(joinType: String) : (MemoryStream[(Int, Long)], DataFrame) = { val inputStream = MemoryStream[(Int, Long)] val df = inputStream.toDS() .select(col("_1").as("value"), timestamp_seconds($"_2").as("timestamp")) val leftStream = df.select(col("value").as("leftId"), col("timestamp").as("leftTime")) val rightStream = df // Introduce misses for ease of debugging .where(col("value") % 2 === 0) .select(col("value").as("rightId"), col("timestamp").as("rightTime")) val joined = leftStream .withWatermark("leftTime", "5 seconds") .join( rightStream.withWatermark("rightTime", "5 seconds"), expr("leftId = rightId AND rightTime >= leftTime AND " + "rightTime <= leftTime + interval 5 seconds"), joinType) val select = if (joinType == "left_semi") { joined.select(col("leftId"), col("leftTime").cast("int")) } else { joined.select(col("leftId"), col("leftTime").cast("int"), col("rightId"), col("rightTime").cast("int")) } (inputStream, select) } } class StreamingInnerJoinSuite extends StreamingJoinSuite { import testImplicits._ test("stream stream inner join on non-time column") { val input1 = MemoryStream[Int] val input2 = MemoryStream[Int] val df1 = input1.toDF.select('value as "key", ('value * 2) as "leftValue") val df2 = input2.toDF.select('value as "key", ('value * 3) as "rightValue") val joined = df1.join(df2, "key") testStream(joined)( AddData(input1, 1), CheckAnswer(), AddData(input2, 1, 10), // 1 arrived on input1 first, then input2, should join CheckNewAnswer((1, 2, 3)), AddData(input1, 10), // 10 arrived on input2 first, then input1, should join CheckNewAnswer((10, 20, 30)), AddData(input2, 1), // another 1 in input2 should join with 1 input1 CheckNewAnswer((1, 2, 3)), StopStream, StartStream(), AddData(input1, 1), // multiple 1s should be kept in state causing multiple (1, 2, 3) CheckNewAnswer((1, 2, 3), (1, 2, 3)), StopStream, StartStream(), AddData(input1, 100), AddData(input2, 100), CheckNewAnswer((100, 200, 300)) ) } test("stream stream inner join on windows - without watermark") { val input1 = MemoryStream[Int] val input2 = MemoryStream[Int] val df1 = input1.toDF .select('value as "key", timestamp_seconds($"value") as "timestamp", ('value * 2) as "leftValue") .select('key, window('timestamp, "10 second"), 'leftValue) val df2 = input2.toDF .select('value as "key", timestamp_seconds($"value") as "timestamp", ('value * 3) as "rightValue") .select('key, window('timestamp, "10 second"), 'rightValue) val joined = df1.join(df2, Seq("key", "window")) .select('key, $"window.end".cast("long"), 'leftValue, 'rightValue) testStream(joined)( AddData(input1, 1), CheckNewAnswer(), AddData(input2, 1), CheckNewAnswer((1, 10, 2, 3)), StopStream, StartStream(), AddData(input1, 25), CheckNewAnswer(), StopStream, StartStream(), AddData(input2, 25), CheckNewAnswer((25, 30, 50, 75)), AddData(input1, 1), CheckNewAnswer((1, 10, 2, 3)), // State for 1 still around as there is no watermark StopStream, StartStream(), AddData(input1, 5), CheckNewAnswer(), AddData(input2, 5), CheckNewAnswer((5, 10, 10, 15)) // No filter by any watermark ) } test("stream stream inner join on windows - with watermark") { val input1 = MemoryStream[Int] val input2 = MemoryStream[Int] val df1 = input1.toDF .select('value as "key", timestamp_seconds($"value") as "timestamp", ('value * 2) as "leftValue") .withWatermark("timestamp", "10 seconds") .select('key, window('timestamp, "10 second"), 'leftValue) val df2 = input2.toDF .select('value as "key", timestamp_seconds($"value") as "timestamp", ('value * 3) as "rightValue") .select('key, window('timestamp, "10 second"), 'rightValue) val joined = df1.join(df2, Seq("key", "window")) .select('key, $"window.end".cast("long"), 'leftValue, 'rightValue) testStream(joined)( AddData(input1, 1), CheckAnswer(), assertNumStateRows(total = 1, updated = 1), AddData(input2, 1), CheckAnswer((1, 10, 2, 3)), assertNumStateRows(total = 2, updated = 1), StopStream, StartStream(), AddData(input1, 25), CheckNewAnswer(), // watermark = 15, no-data-batch should remove 2 rows having window=[0,10] assertNumStateRows(total = 1, updated = 1), AddData(input2, 25), CheckNewAnswer((25, 30, 50, 75)), assertNumStateRows(total = 2, updated = 1), StopStream, StartStream(), AddData(input2, 1), CheckNewAnswer(), // Should not join as < 15 removed assertNumStateRows(total = 2, updated = 0), // row not add as 1 < state key watermark = 15 AddData(input1, 5), CheckNewAnswer(), // Same reason as above assertNumStateRows(total = 2, updated = 0, droppedByWatermark = 1) ) } test("stream stream inner join with time range - with watermark - one side condition") { import org.apache.spark.sql.functions._ val leftInput = MemoryStream[(Int, Int)] val rightInput = MemoryStream[(Int, Int)] val df1 = leftInput.toDF.toDF("leftKey", "time") .select('leftKey, timestamp_seconds($"time") as "leftTime", ('leftKey * 2) as "leftValue") .withWatermark("leftTime", "10 seconds") val df2 = rightInput.toDF.toDF("rightKey", "time") .select('rightKey, timestamp_seconds($"time") as "rightTime", ('rightKey * 3) as "rightValue") .withWatermark("rightTime", "10 seconds") val joined = df1.join(df2, expr("leftKey = rightKey AND leftTime < rightTime - interval 5 seconds")) .select('leftKey, 'leftTime.cast("int"), 'rightTime.cast("int")) testStream(joined)( AddData(leftInput, (1, 5)), CheckAnswer(), AddData(rightInput, (1, 11)), CheckNewAnswer((1, 5, 11)), AddData(rightInput, (1, 10)), CheckNewAnswer(), // no match as leftTime 5 is not < rightTime 10 - 5 assertNumStateRows(total = 3, updated = 3), // Increase event time watermark to 20s by adding data with time = 30s on both inputs AddData(leftInput, (1, 3), (1, 30)), CheckNewAnswer((1, 3, 10), (1, 3, 11)), assertNumStateRows(total = 5, updated = 2), AddData(rightInput, (0, 30)), CheckNewAnswer(), // event time watermark: max event time - 10 ==> 30 - 10 = 20 // so left side going to only receive data where leftTime > 20 // right side state constraint: 20 < leftTime < rightTime - 5 ==> rightTime > 25 // right state where rightTime <= 25 will be cleared, (1, 11) and (1, 10) removed assertNumStateRows(total = 4, updated = 1), // New data to right input should match with left side (1, 3) and (1, 5), as left state should // not be cleared. But rows rightTime <= 20 should be filtered due to event time watermark and // state rows with rightTime <= 25 should be removed from state. // (1, 20) ==> filtered by event time watermark = 20 // (1, 21) ==> passed filter, matched with left (1, 3) and (1, 5), not added to state // as 21 < state watermark = 25 // (1, 28) ==> passed filter, matched with left (1, 3) and (1, 5), added to state AddData(rightInput, (1, 20), (1, 21), (1, 28)), CheckNewAnswer((1, 3, 21), (1, 5, 21), (1, 3, 28), (1, 5, 28)), assertNumStateRows(total = 5, updated = 1, droppedByWatermark = 1), // New data to left input with leftTime <= 20 should be filtered due to event time watermark AddData(leftInput, (1, 20), (1, 21)), CheckNewAnswer((1, 21, 28)), assertNumStateRows(total = 6, updated = 1, droppedByWatermark = 1) ) } test("stream stream inner join with time range - with watermark - two side conditions") { import org.apache.spark.sql.functions._ val leftInput = MemoryStream[(Int, Int)] val rightInput = MemoryStream[(Int, Int)] val df1 = leftInput.toDF.toDF("leftKey", "time") .select('leftKey, timestamp_seconds($"time") as "leftTime", ('leftKey * 2) as "leftValue") .withWatermark("leftTime", "20 seconds") val df2 = rightInput.toDF.toDF("rightKey", "time") .select('rightKey, timestamp_seconds($"time") as "rightTime", ('rightKey * 3) as "rightValue") .withWatermark("rightTime", "30 seconds") val condition = expr( "leftKey = rightKey AND " + "leftTime BETWEEN rightTime - interval 10 seconds AND rightTime + interval 5 seconds") // This translates to leftTime <= rightTime + 5 seconds AND leftTime >= rightTime - 10 seconds // So given leftTime, rightTime has to be BETWEEN leftTime - 5 seconds AND leftTime + 10 seconds // // =============== * ======================== * ============================== * ==> leftTime // | | | // |<---- 5s -->|<------ 10s ------>| |<------ 10s ------>|<---- 5s -->| // | | | // == * ============================== * =========>============== * ===============> rightTime // // E.g. // if rightTime = 60, then it matches only leftTime = [50, 65] // if leftTime = 20, then it match only with rightTime = [15, 30] // // State value predicates // left side: // values allowed: leftTime >= rightTime - 10s ==> leftTime > eventTimeWatermark - 10 // drop state where leftTime < eventTime - 10 // right side: // values allowed: rightTime >= leftTime - 5s ==> rightTime > eventTimeWatermark - 5 // drop state where rightTime < eventTime - 5 val joined = df1.join(df2, condition).select('leftKey, 'leftTime.cast("int"), 'rightTime.cast("int")) testStream(joined)( // If leftTime = 20, then it match only with rightTime = [15, 30] AddData(leftInput, (1, 20)), CheckAnswer(), AddData(rightInput, (1, 14), (1, 15), (1, 25), (1, 26), (1, 30), (1, 31)), CheckNewAnswer((1, 20, 15), (1, 20, 25), (1, 20, 26), (1, 20, 30)), assertNumStateRows(total = 7, updated = 7), // If rightTime = 60, then it matches only leftTime = [50, 65] AddData(rightInput, (1, 60)), CheckNewAnswer(), // matches with nothing on the left AddData(leftInput, (1, 49), (1, 50), (1, 65), (1, 66)), CheckNewAnswer((1, 50, 60), (1, 65, 60)), // Event time watermark = min(left: 66 - delay 20 = 46, right: 60 - delay 30 = 30) = 30 // Left state value watermark = 30 - 10 = slightly less than 20 (since condition has <=) // Should drop < 20 from left, i.e., none // Right state value watermark = 30 - 5 = slightly less than 25 (since condition has <=) // Should drop < 25 from the right, i.e., 14 and 15 assertNumStateRows(total = 10, updated = 5), // 12 - 2 removed AddData(leftInput, (1, 30), (1, 31)), // 30 should not be processed or added to state CheckNewAnswer((1, 31, 26), (1, 31, 30), (1, 31, 31)), assertNumStateRows(total = 11, updated = 1, droppedByWatermark = 1), // only 31 added // Advance the watermark AddData(rightInput, (1, 80)), CheckNewAnswer(), // Event time watermark = min(left: 66 - delay 20 = 46, right: 80 - delay 30 = 50) = 46 // Left state value watermark = 46 - 10 = slightly less than 36 (since condition has <=) // Should drop < 36 from left, i.e., 20, 31 (30 was not added) // Right state value watermark = 46 - 5 = slightly less than 41 (since condition has <=) // Should drop < 41 from the right, i.e., 25, 26, 30, 31 assertNumStateRows(total = 6, updated = 1), // 12 - 6 removed AddData(rightInput, (1, 46), (1, 50)), // 46 should not be processed or added to state CheckNewAnswer((1, 49, 50), (1, 50, 50)), assertNumStateRows(total = 7, updated = 1, droppedByWatermark = 1) // 50 added ) } testQuietly("stream stream inner join without equality predicate") { val input1 = MemoryStream[Int] val input2 = MemoryStream[Int] val df1 = input1.toDF.select('value as "leftKey", ('value * 2) as "leftValue") val df2 = input2.toDF.select('value as "rightKey", ('value * 3) as "rightValue") val joined = df1.join(df2, expr("leftKey < rightKey")) val e = intercept[Exception] { val q = joined.writeStream.format("memory").queryName("test").start() input1.addData(1) q.awaitTermination(10000) } assert(e.toString.contains("Stream-stream join without equality predicate is not supported")) } test("stream stream self join") { val input = MemoryStream[Int] val df = input.toDF val join = df.select('value % 5 as "key", 'value).join( df.select('value % 5 as "key", 'value), "key") testStream(join)( AddData(input, 1, 2), CheckAnswer((1, 1, 1), (2, 2, 2)), StopStream, StartStream(), AddData(input, 3, 6), /* (1, 1) (1, 1) (2, 2) x (2, 2) = (1, 1, 1), (1, 1, 6), (2, 2, 2), (1, 6, 1), (1, 6, 6) (1, 6) (1, 6) */ CheckAnswer((3, 3, 3), (1, 1, 1), (1, 1, 6), (2, 2, 2), (1, 6, 1), (1, 6, 6))) } test("locality preferences of StateStoreAwareZippedRDD") { import StreamingSymmetricHashJoinHelper._ withTempDir { tempDir => val queryId = UUID.randomUUID val opId = 0 val path = Utils.createDirectory(tempDir.getAbsolutePath, Random.nextFloat.toString).toString val stateInfo = StatefulOperatorStateInfo(path, queryId, opId, 0L, 5) implicit val sqlContext = spark.sqlContext val coordinatorRef = sqlContext.streams.stateStoreCoordinator val numPartitions = 5 val storeNames = Seq("name1", "name2") val partitionAndStoreNameToLocation = { for (partIndex <- 0 until numPartitions; storeName <- storeNames) yield { (partIndex, storeName) -> s"host-$partIndex-$storeName" } }.toMap partitionAndStoreNameToLocation.foreach { case ((partIndex, storeName), hostName) => val providerId = StateStoreProviderId(stateInfo, partIndex, storeName) coordinatorRef.reportActiveInstance(providerId, hostName, s"exec-$hostName", Seq.empty) require( coordinatorRef.getLocation(providerId) === Some(ExecutorCacheTaskLocation(hostName, s"exec-$hostName").toString)) } val rdd1 = spark.sparkContext.makeRDD(1 to 10, numPartitions) val rdd2 = spark.sparkContext.makeRDD((1 to 10).map(_.toString), numPartitions) val rdd = rdd1.stateStoreAwareZipPartitions(rdd2, stateInfo, storeNames, coordinatorRef) { (_, left, right) => left.zip(right) } require(rdd.partitions.length === numPartitions) for (partIndex <- 0 until numPartitions) { val expectedLocations = storeNames.map { storeName => val hostName = partitionAndStoreNameToLocation((partIndex, storeName)) ExecutorCacheTaskLocation(hostName, s"exec-$hostName").toString }.toSet assert(rdd.preferredLocations(rdd.partitions(partIndex)).toSet === expectedLocations) } } } test("join between three streams") { val input1 = MemoryStream[Int] val input2 = MemoryStream[Int] val input3 = MemoryStream[Int] val df1 = input1.toDF.select('value as "leftKey", ('value * 2) as "leftValue") val df2 = input2.toDF.select('value as "middleKey", ('value * 3) as "middleValue") val df3 = input3.toDF.select('value as "rightKey", ('value * 5) as "rightValue") val joined = df1.join(df2, expr("leftKey = middleKey")).join(df3, expr("rightKey = middleKey")) testStream(joined)( AddData(input1, 1, 5), AddData(input2, 1, 5, 10), AddData(input3, 5, 10), CheckNewAnswer((5, 10, 5, 15, 5, 25))) } test("streaming join should require HashClusteredDistribution from children") { val input1 = MemoryStream[Int] val input2 = MemoryStream[Int] val df1 = input1.toDF.select('value as 'a, 'value * 2 as 'b) val df2 = input2.toDF.select('value as 'a, 'value * 2 as 'b).repartition('b) val joined = df1.join(df2, Seq("a", "b")).select('a) testStream(joined)( AddData(input1, 1.to(1000): _*), AddData(input2, 1.to(1000): _*), CheckAnswer(1.to(1000): _*), Execute { query => // Verify the query plan assert(query.lastExecution.executedPlan.collect { case j @ StreamingSymmetricHashJoinExec(_, _, _, _, _, _, _, _, _: ShuffleExchangeExec, ShuffleExchangeExec(_, _: ShuffleExchangeExec, _)) => j }.size == 1) }) } test("SPARK-26187 restore the stream-stream inner join query from Spark 2.4") { val inputStream = MemoryStream[(Int, Long)] val df = inputStream.toDS() .select(col("_1").as("value"), timestamp_seconds($"_2").as("timestamp")) val leftStream = df.select(col("value").as("leftId"), col("timestamp").as("leftTime")) val rightStream = df // Introduce misses for ease of debugging .where(col("value") % 2 === 0) .select(col("value").as("rightId"), col("timestamp").as("rightTime")) val query = leftStream .withWatermark("leftTime", "5 seconds") .join( rightStream.withWatermark("rightTime", "5 seconds"), expr("rightId = leftId AND rightTime >= leftTime AND " + "rightTime <= leftTime + interval 5 seconds"), joinType = "inner") .select(col("leftId"), col("leftTime").cast("int"), col("rightId"), col("rightTime").cast("int")) val resourceUri = this.getClass.getResource( "/structured-streaming/checkpoint-version-2.4.0-streaming-join/").toURI val checkpointDir = Utils.createTempDir().getCanonicalFile // Copy the checkpoint to a temp dir to prevent changes to the original. // Not doing this will lead to the test passing on the first run, but fail subsequent runs. FileUtils.copyDirectory(new File(resourceUri), checkpointDir) inputStream.addData((1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L)) testStream(query)( StartStream(checkpointLocation = checkpointDir.getAbsolutePath), /* Note: The checkpoint was generated using the following input in Spark version 2.4.0 AddData(inputStream, (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L)), // batch 1 - global watermark = 0 // states // left: (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L) // right: (2, 2L), (4, 4L) CheckNewAnswer((2, 2L, 2, 2L), (4, 4L, 4, 4L)), assertNumStateRows(7, 7), */ AddData(inputStream, (6, 6L), (7, 7L), (8, 8L), (9, 9L), (10, 10L)), // batch 2: same result as above test CheckNewAnswer((6, 6L, 6, 6L), (8, 8L, 8, 8L), (10, 10L, 10, 10L)), assertNumStateRows(11, 6), Execute { query => // Verify state format = 1 val f = query.lastExecution.executedPlan.collect { case f: StreamingSymmetricHashJoinExec => f } assert(f.size == 1) assert(f.head.stateFormatVersion == 1) } ) } } class StreamingOuterJoinSuite extends StreamingJoinSuite { import testImplicits._ import org.apache.spark.sql.functions._ test("left outer early state exclusion on left") { val (leftInput, rightInput, joined) = setupWindowedJoinWithLeftCondition("left_outer") testStream(joined)( MultiAddData(leftInput, 1, 2, 3)(rightInput, 3, 4, 5), // The left rows with leftValue <= 4 should generate their outer join row now and // not get added to the state. CheckNewAnswer(Row(3, 10, 6, "9"), Row(1, 10, 2, null), Row(2, 10, 4, null)), assertNumStateRows(total = 4, updated = 4), // We shouldn't get more outer join rows when the watermark advances. MultiAddData(leftInput, 20)(rightInput, 21), CheckNewAnswer(), AddData(rightInput, 20), CheckNewAnswer((20, 30, 40, "60")) ) } test("left outer early state exclusion on right") { val (leftInput, rightInput, joined) = setupWindowedJoinWithRightCondition("left_outer") testStream(joined)( MultiAddData(leftInput, 3, 4, 5)(rightInput, 1, 2, 3), // The right rows with rightValue <= 7 should never be added to the state. CheckNewAnswer(Row(3, 10, 6, "9")), // rightValue = 9 > 7 hence joined and added to state assertNumStateRows(total = 4, updated = 4), // When the watermark advances, we get the outer join rows just as we would if they // were added but didn't match the full join condition. MultiAddData(leftInput, 20)(rightInput, 21), // watermark = 10, no-data-batch computes nulls CheckNewAnswer(Row(4, 10, 8, null), Row(5, 10, 10, null)), AddData(rightInput, 20), CheckNewAnswer(Row(20, 30, 40, "60")) ) } test("right outer early state exclusion on left") { val (leftInput, rightInput, joined) = setupWindowedJoinWithLeftCondition("right_outer") testStream(joined)( MultiAddData(leftInput, 1, 2, 3)(rightInput, 3, 4, 5), // The left rows with leftValue <= 4 should never be added to the state. CheckNewAnswer(Row(3, 10, 6, "9")), // leftValue = 7 > 4 hence joined and added to state assertNumStateRows(total = 4, updated = 4), // When the watermark advances, we get the outer join rows just as we would if they // were added but didn't match the full join condition. MultiAddData(leftInput, 20)(rightInput, 21), // watermark = 10, no-data-batch computes nulls CheckNewAnswer(Row(4, 10, null, "12"), Row(5, 10, null, "15")), AddData(rightInput, 20), CheckNewAnswer(Row(20, 30, 40, "60")) ) } test("right outer early state exclusion on right") { val (leftInput, rightInput, joined) = setupWindowedJoinWithRightCondition("right_outer") testStream(joined)( MultiAddData(leftInput, 3, 4, 5)(rightInput, 1, 2, 3), // The right rows with rightValue <= 7 should generate their outer join row now and // not get added to the state. CheckNewAnswer(Row(3, 10, 6, "9"), Row(1, 10, null, "3"), Row(2, 10, null, "6")), assertNumStateRows(total = 4, updated = 4), // We shouldn't get more outer join rows when the watermark advances. MultiAddData(leftInput, 20)(rightInput, 21), CheckNewAnswer(), AddData(rightInput, 20), CheckNewAnswer((20, 30, 40, "60")) ) } test("windowed left outer join") { val (leftInput, rightInput, joined) = setupWindowedJoin("left_outer") testStream(joined)( // Test inner part of the join. MultiAddData(leftInput, 1, 2, 3, 4, 5)(rightInput, 3, 4, 5, 6, 7), CheckNewAnswer((3, 10, 6, 9), (4, 10, 8, 12), (5, 10, 10, 15)), MultiAddData(leftInput, 21)(rightInput, 22), // watermark = 11, no-data-batch computes nulls CheckNewAnswer(Row(1, 10, 2, null), Row(2, 10, 4, null)), assertNumStateRows(total = 2, updated = 12), AddData(leftInput, 22), CheckNewAnswer(Row(22, 30, 44, 66)), assertNumStateRows(total = 3, updated = 1) ) } test("windowed right outer join") { val (leftInput, rightInput, joined) = setupWindowedJoin("right_outer") testStream(joined)( // Test inner part of the join. MultiAddData(leftInput, 1, 2, 3, 4, 5)(rightInput, 3, 4, 5, 6, 7), CheckNewAnswer((3, 10, 6, 9), (4, 10, 8, 12), (5, 10, 10, 15)), MultiAddData(leftInput, 21)(rightInput, 22), // watermark = 11, no-data-batch computes nulls CheckNewAnswer(Row(6, 10, null, 18), Row(7, 10, null, 21)), assertNumStateRows(total = 2, updated = 12), AddData(leftInput, 22), CheckNewAnswer(Row(22, 30, 44, 66)), assertNumStateRows(total = 3, updated = 1) ) } Seq( ("left_outer", Row(3, null, 5, null)), ("right_outer", Row(null, 2, null, 5)) ).foreach { case (joinType: String, outerResult) => test(s"${joinType.replaceAllLiterally("_", " ")} with watermark range condition") { val (leftInput, rightInput, joined) = setupJoinWithRangeCondition(joinType) testStream(joined)( AddData(leftInput, (1, 5), (3, 5)), CheckAnswer(), AddData(rightInput, (1, 10), (2, 5)), CheckNewAnswer((1, 1, 5, 10)), AddData(rightInput, (1, 11)), CheckNewAnswer(), // no match as left time is too low assertNumStateRows(total = 5, updated = 5), // Increase event time watermark to 20s by adding data with time = 30s on both inputs AddData(leftInput, (1, 7), (1, 30)), CheckNewAnswer((1, 1, 7, 10), (1, 1, 7, 11)), assertNumStateRows(total = 7, updated = 2), AddData(rightInput, (0, 30)), // watermark = 30 - 10 = 20, no-data-batch computes nulls CheckNewAnswer(outerResult), assertNumStateRows(total = 2, updated = 1) ) Seq( ("10 minutes", "interval 3 minutes 30 seconds"), ("10 minutes", "interval '3:30' minute to second")).foreach { case (watermark, bound) => val (leftInput2, rightInput2, joined2) = setupJoinWithRangeCondition( joinType, watermark, bound, bound) testStream(joined2)( AddData(leftInput2, (1, 210), (3, 5)), CheckAnswer(), AddData(rightInput2, (1, 300), (2, 5)), CheckNewAnswer((1, 1, 210, 300)), AddData(rightInput2, (1, 450)), CheckNewAnswer(), assertNumStateRows(total = 5, updated = 5), AddData(leftInput2, (1, 260), (1, 1800)), CheckNewAnswer((1, 1, 260, 300), (1, 1, 260, 450)), assertNumStateRows(total = 7, updated = 2), AddData(rightInput2, (0, 1800)), CheckNewAnswer(outerResult), assertNumStateRows(total = 2, updated = 1) ) } } } // When the join condition isn't true, the outer null rows must be generated, even if the join // keys themselves have a match. test("left outer join with non-key condition violated") { val (leftInput, simpleLeftDf) = setupStream("left", 2) val (rightInput, simpleRightDf) = setupStream("right", 3) val left = simpleLeftDf.select('key, window('leftTime, "10 second"), 'leftValue) val right = simpleRightDf.select('key, window('rightTime, "10 second"), 'rightValue) val joined = left.join( right, left("key") === right("key") && left("window") === right("window") && 'leftValue > 10 && ('rightValue < 300 || 'rightValue > 1000), "left_outer") .select(left("key"), left("window.end").cast("long"), 'leftValue, 'rightValue) testStream(joined)( // leftValue <= 10 should generate outer join rows even though it matches right keys MultiAddData(leftInput, 1, 2, 3)(rightInput, 1, 2, 3), CheckNewAnswer(Row(1, 10, 2, null), Row(2, 10, 4, null), Row(3, 10, 6, null)), assertNumStateRows(total = 3, updated = 3), // only right 1, 2, 3 added MultiAddData(leftInput, 20)(rightInput, 21), // watermark = 10, no-data-batch cleared < 10 CheckNewAnswer(), assertNumStateRows(total = 2, updated = 2), // only 20 and 21 left in state AddData(rightInput, 20), CheckNewAnswer(Row(20, 30, 40, 60)), assertNumStateRows(total = 3, updated = 1), // leftValue and rightValue both satisfying condition should not generate outer join rows MultiAddData(leftInput, 40, 41)(rightInput, 40, 41), // watermark = 31 CheckNewAnswer((40, 50, 80, 120), (41, 50, 82, 123)), assertNumStateRows(total = 4, updated = 4), // only left 40, 41 + right 40,41 left in state MultiAddData(leftInput, 70)(rightInput, 71), // watermark = 60 CheckNewAnswer(), assertNumStateRows(total = 2, updated = 2), // only 70, 71 left in state AddData(rightInput, 70), CheckNewAnswer((70, 80, 140, 210)), assertNumStateRows(total = 3, updated = 1), // rightValue between 300 and 1000 should generate outer join rows even though it matches left MultiAddData(leftInput, 101, 102, 103)(rightInput, 101, 102, 103), // watermark = 91 CheckNewAnswer(), assertNumStateRows(total = 6, updated = 3), // only 101 - 103 left in state MultiAddData(leftInput, 1000)(rightInput, 1001), CheckNewAnswer( Row(101, 110, 202, null), Row(102, 110, 204, null), Row(103, 110, 206, null)), assertNumStateRows(total = 2, updated = 2) ) } test("SPARK-26187 self left outer join should not return outer nulls for already matched rows") { val (inputStream, query) = setupSelfJoin("left_outer") testStream(query)( AddData(inputStream, (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L)), // batch 1 - global watermark = 0 // states // left: (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L) // right: (2, 2L), (4, 4L) CheckNewAnswer((2, 2L, 2, 2L), (4, 4L, 4, 4L)), assertNumStateRows(7, 7), AddData(inputStream, (6, 6L), (7, 7L), (8, 8L), (9, 9L), (10, 10L)), // batch 2 - global watermark = 5 // states // left: (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L), (6, 6L), (7, 7L), (8, 8L), // (9, 9L), (10, 10L) // right: (6, 6L), (8, 8L), (10, 10L) // states evicted // left: nothing (it waits for 5 seconds more than watermark due to join condition) // right: (2, 2L), (4, 4L) // NOTE: look for evicted rows in right which are not evicted from left - they were // properly joined in batch 1 CheckNewAnswer((6, 6L, 6, 6L), (8, 8L, 8, 8L), (10, 10L, 10, 10L)), assertNumStateRows(13, 8), AddData(inputStream, (11, 11L), (12, 12L), (13, 13L), (14, 14L), (15, 15L)), // batch 3 // - global watermark = 9 <= min(9, 10) // states // left: (4, 4L), (5, 5L), (6, 6L), (7, 7L), (8, 8L), (9, 9L), (10, 10L), (11, 11L), // (12, 12L), (13, 13L), (14, 14L), (15, 15L) // right: (10, 10L), (12, 12L), (14, 14L) // states evicted // left: (1, 1L), (2, 2L), (3, 3L) // right: (6, 6L), (8, 8L) CheckNewAnswer( Row(12, 12L, 12, 12L), Row(14, 14L, 14, 14L), Row(1, 1L, null, null), Row(3, 3L, null, null)), assertNumStateRows(15, 7) ) } test("SPARK-26187 self right outer join should not return outer nulls for already matched rows") { val inputStream = MemoryStream[(Int, Long)] val df = inputStream.toDS() .select(col("_1").as("value"), timestamp_seconds($"_2").as("timestamp")) // we're just flipping "left" and "right" from left outer join and apply right outer join val leftStream = df // Introduce misses for ease of debugging .where(col("value") % 2 === 0) .select(col("value").as("leftId"), col("timestamp").as("leftTime")) val rightStream = df.select(col("value").as("rightId"), col("timestamp").as("rightTime")) val query = leftStream .withWatermark("leftTime", "5 seconds") .join( rightStream.withWatermark("rightTime", "5 seconds"), expr("leftId = rightId AND leftTime >= rightTime AND " + "leftTime <= rightTime + interval 5 seconds"), joinType = "rightOuter") .select(col("leftId"), col("leftTime").cast("int"), col("rightId"), col("rightTime").cast("int")) // we can just flip left and right in the explanation of left outer query test // to assume the status of right outer query, hence skip explaining here testStream(query)( AddData(inputStream, (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L)), CheckNewAnswer((2, 2L, 2, 2L), (4, 4L, 4, 4L)), assertNumStateRows(7, 7), AddData(inputStream, (6, 6L), (7, 7L), (8, 8L), (9, 9L), (10, 10L)), CheckNewAnswer((6, 6L, 6, 6L), (8, 8L, 8, 8L), (10, 10L, 10, 10L)), assertNumStateRows(13, 8), AddData(inputStream, (11, 11L), (12, 12L), (13, 13L), (14, 14L), (15, 15L)), CheckNewAnswer( Row(12, 12L, 12, 12L), Row(14, 14L, 14, 14L), Row(null, null, 1, 1L), Row(null, null, 3, 3L)), assertNumStateRows(15, 7) ) } test("SPARK-26187 restore the stream-stream outer join query from Spark 2.4") { val inputStream = MemoryStream[(Int, Long)] val df = inputStream.toDS() .select(col("_1").as("value"), timestamp_seconds($"_2").as("timestamp")) val leftStream = df.select(col("value").as("leftId"), col("timestamp").as("leftTime")) val rightStream = df // Introduce misses for ease of debugging .where(col("value") % 2 === 0) .select(col("value").as("rightId"), col("timestamp").as("rightTime")) val query = leftStream .withWatermark("leftTime", "5 seconds") .join( rightStream.withWatermark("rightTime", "5 seconds"), expr("rightId = leftId AND rightTime >= leftTime AND " + "rightTime <= leftTime + interval 5 seconds"), joinType = "leftOuter") .select(col("leftId"), col("leftTime").cast("int"), col("rightId"), col("rightTime").cast("int")) val resourceUri = this.getClass.getResource( "/structured-streaming/checkpoint-version-2.4.0-streaming-join/").toURI val checkpointDir = Utils.createTempDir().getCanonicalFile // Copy the checkpoint to a temp dir to prevent changes to the original. // Not doing this will lead to the test passing on the first run, but fail subsequent runs. FileUtils.copyDirectory(new File(resourceUri), checkpointDir) inputStream.addData((1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L)) /* Note: The checkpoint was generated using the following input in Spark version 2.4.0 AddData(inputStream, (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L)), // batch 1 - global watermark = 0 // states // left: (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L) // right: (2, 2L), (4, 4L) CheckNewAnswer((2, 2L, 2, 2L), (4, 4L, 4, 4L)), assertNumStateRows(7, 7), */ // we just fail the query if the checkpoint was create from less than Spark 3.0 val e = intercept[StreamingQueryException] { val writer = query.writeStream.format("console") .option("checkpointLocation", checkpointDir.getAbsolutePath).start() inputStream.addData((7, 7L), (8, 8L)) eventually(timeout(streamingTimeout)) { assert(writer.exception.isDefined) } throw writer.exception.get } assert(e.getMessage.toLowerCase(Locale.ROOT) .contains("the query is using stream-stream leftouter join with state format version 1")) } test("SPARK-29438: ensure UNION doesn't lead stream-stream join to use shifted partition IDs") { def constructUnionDf(desiredPartitionsForInput1: Int) : (MemoryStream[Int], MemoryStream[Int], MemoryStream[Int], DataFrame) = { val input1 = MemoryStream[Int](desiredPartitionsForInput1) val df1 = input1.toDF .select( 'value as "key", 'value as "leftValue", 'value as "rightValue") val (input2, df2) = setupStream("left", 2) val (input3, df3) = setupStream("right", 3) val joined = df2 .join(df3, df2("key") === df3("key") && df2("leftTime") === df3("rightTime"), "inner") .select(df2("key"), 'leftValue, 'rightValue) (input1, input2, input3, df1.union(joined)) } withTempDir { tempDir => val (input1, input2, input3, unionDf) = constructUnionDf(2) testStream(unionDf)( StartStream(checkpointLocation = tempDir.getAbsolutePath), MultiAddData( (input1, Seq(11, 12, 13)), (input2, Seq(11, 12, 13, 14, 15)), (input3, Seq(13, 14, 15, 16, 17))), CheckNewAnswer(Row(11, 11, 11), Row(12, 12, 12), Row(13, 13, 13), Row(13, 26, 39), Row(14, 28, 42), Row(15, 30, 45)), StopStream ) // We're restoring the query with different number of partitions in left side of UNION, // which leads right side of union to have mismatched partition IDs if it relies on // TaskContext.partitionId(). SPARK-29438 fixes this issue to not rely on it. val (newInput1, newInput2, newInput3, newUnionDf) = constructUnionDf(3) newInput1.addData(11, 12, 13) newInput2.addData(11, 12, 13, 14, 15) newInput3.addData(13, 14, 15, 16, 17) testStream(newUnionDf)( StartStream(checkpointLocation = tempDir.getAbsolutePath), MultiAddData( (newInput1, Seq(21, 22, 23)), (newInput2, Seq(21, 22, 23, 24, 25)), (newInput3, Seq(23, 24, 25, 26, 27))), CheckNewAnswer(Row(21, 21, 21), Row(22, 22, 22), Row(23, 23, 23), Row(23, 46, 69), Row(24, 48, 72), Row(25, 50, 75)) ) } } test("SPARK-32148 stream-stream join regression on Spark 3.0.0") { val input1 = MemoryStream[(Timestamp, String, String)] val df1 = input1.toDF .selectExpr("_1 as eventTime", "_2 as id", "_3 as comment") .withWatermark(s"eventTime", "2 minutes") val input2 = MemoryStream[(Timestamp, String, String)] val df2 = input2.toDF .selectExpr("_1 as eventTime", "_2 as id", "_3 as name") .withWatermark(s"eventTime", "4 minutes") val joined = df1.as("left") .join(df2.as("right"), expr(""" |left.id = right.id AND left.eventTime BETWEEN | right.eventTime - INTERVAL 30 seconds AND | right.eventTime + INTERVAL 30 seconds """.stripMargin), joinType = "leftOuter") val inputDataForInput1 = Seq( (Timestamp.valueOf("2020-01-01 00:00:00"), "abc", "has no join partner"), (Timestamp.valueOf("2020-01-02 00:00:00"), "abc", "joined with A"), (Timestamp.valueOf("2020-01-02 01:00:00"), "abc", "joined with B")) val inputDataForInput2 = Seq( (Timestamp.valueOf("2020-01-02 00:00:10"), "abc", "A"), (Timestamp.valueOf("2020-01-02 00:59:59"), "abc", "B"), (Timestamp.valueOf("2020-01-02 02:00:00"), "abc", "C")) val expectedOutput = Seq( (Timestamp.valueOf("2020-01-01 00:00:00"), "abc", "has no join partner", null, null, null), (Timestamp.valueOf("2020-01-02 00:00:00"), "abc", "joined with A", Timestamp.valueOf("2020-01-02 00:00:10"), "abc", "A"), (Timestamp.valueOf("2020-01-02 01:00:00"), "abc", "joined with B", Timestamp.valueOf("2020-01-02 00:59:59"), "abc", "B")) testStream(joined)( MultiAddData((input1, inputDataForInput1), (input2, inputDataForInput2)), CheckNewAnswer(expectedOutput.head, expectedOutput.tail: _*) ) } } class StreamingFullOuterJoinSuite extends StreamingJoinSuite { test("windowed full outer join") { val (leftInput, rightInput, joined) = setupWindowedJoin("full_outer") testStream(joined)( MultiAddData(leftInput, 1, 2, 3, 4, 5)(rightInput, 3, 4, 5, 6, 7), CheckNewAnswer(Row(3, 10, 6, 9), Row(4, 10, 8, 12), Row(5, 10, 10, 15)), // states // left: 1, 2, 3, 4 ,5 // right: 3, 4, 5, 6, 7 assertNumStateRows(total = 10, updated = 10), MultiAddData(leftInput, 21)(rightInput, 22), // Watermark = 11, should remove rows having window=[0,10]. CheckNewAnswer(Row(1, 10, 2, null), Row(2, 10, 4, null), Row(6, 10, null, 18), Row(7, 10, null, 21)), // states // left: 21 // right: 22 // // states evicted // left: 1, 2, 3, 4 ,5 (below watermark) // right: 3, 4, 5, 6, 7 (below watermark) assertNumStateRows(total = 2, updated = 2), AddData(leftInput, 22), CheckNewAnswer(Row(22, 30, 44, 66)), // states // left: 21, 22 // right: 22 assertNumStateRows(total = 3, updated = 1), StopStream, StartStream(), AddData(leftInput, 1), // Row not add as 1 < state key watermark = 12. CheckNewAnswer(), // states // left: 21, 22 // right: 22 assertNumStateRows(total = 3, updated = 0, droppedByWatermark = 1), AddData(rightInput, 5), // Row not add as 5 < state key watermark = 12. CheckNewAnswer(), // states // left: 21, 22 // right: 22 assertNumStateRows(total = 3, updated = 0, droppedByWatermark = 1) ) } test("full outer early state exclusion on left") { val (leftInput, rightInput, joined) = setupWindowedJoinWithLeftCondition("full_outer") testStream(joined)( MultiAddData(leftInput, 1, 2, 3)(rightInput, 3, 4, 5), // The left rows with leftValue <= 4 should generate their outer join rows now and // not get added to the state. CheckNewAnswer(Row(1, 10, 2, null, null, null), Row(2, 10, 4, null, null, null), Row(3, 10, 6, 3, 10, "9")), // states // left: 3 // right: 3, 4, 5 assertNumStateRows(total = 4, updated = 4), // Generate outer join result for all non-matched rows when the watermark advances. MultiAddData(leftInput, 20)(rightInput, 21), CheckNewAnswer(Row(null, null, null, 4, 10, "12"), Row(null, null, null, 5, 10, "15")), // states // left: 20 // right: 21 // // states evicted // left: 3 (below watermark) // right: 3, 4, 5 (below watermark) assertNumStateRows(total = 2, updated = 2), AddData(rightInput, 20), CheckNewAnswer(Row(20, 30, 40, 20, 30, "60")), // states // left: 20 // right: 21, 20 assertNumStateRows(total = 3, updated = 1) ) } test("full outer early state exclusion on right") { val (leftInput, rightInput, joined) = setupWindowedJoinWithRightCondition("full_outer") testStream(joined)( MultiAddData(leftInput, 3, 4, 5)(rightInput, 1, 2, 3), // The right rows with rightValue <= 7 should generate their outer join rows now, // and never be added to the state. // The right row with rightValue = 9 > 7, hence joined and added to state. CheckNewAnswer(Row(null, null, null, 1, 10, "3"), Row(null, null, null, 2, 10, "6"), Row(3, 10, 6, 3, 10, "9")), // states // left: 3, 4, 5 // right: 3 assertNumStateRows(total = 4, updated = 4), // Generate outer join result for all non-matched rows when the watermark advances. MultiAddData(leftInput, 20)(rightInput, 21), CheckNewAnswer(Row(4, 10, 8, null, null, null), Row(5, 10, 10, null, null, null)), // states // left: 20 // right: 21 // // states evicted // left: 3, 4, 5 (below watermark) // right: 3 (below watermark) assertNumStateRows(total = 2, updated = 2), AddData(rightInput, 20), CheckNewAnswer(Row(20, 30, 40, 20, 30, "60")), // states // left: 20 // right: 21, 20 assertNumStateRows(total = 3, updated = 1) ) } test("full outer join with watermark range condition") { val (leftInput, rightInput, joined) = setupJoinWithRangeCondition("full_outer") testStream(joined)( AddData(leftInput, (1, 5), (3, 5)), CheckNewAnswer(), // states // left: (1, 5), (3, 5) // right: nothing assertNumStateRows(total = 2, updated = 2), AddData(rightInput, (1, 10), (2, 5)), // Match left row in the state. CheckNewAnswer(Row(1, 1, 5, 10)), // states // left: (1, 5), (3, 5) // right: (1, 10), (2, 5) assertNumStateRows(total = 4, updated = 2), AddData(rightInput, (1, 9)), // Match left row in the state. CheckNewAnswer(Row(1, 1, 5, 9)), // states // left: (1, 5), (3, 5) // right: (1, 10), (2, 5), (1, 9) assertNumStateRows(total = 5, updated = 1), // Increase event time watermark to 20s by adding data with time = 30s on both inputs. AddData(leftInput, (1, 7), (1, 30)), CheckNewAnswer(Row(1, 1, 7, 9), Row(1, 1, 7, 10)), // states // left: (1, 5), (3, 5), (1, 7), (1, 30) // right: (1, 10), (2, 5), (1, 9) assertNumStateRows(total = 7, updated = 2), // Watermark = 30 - 10 = 20, no matched row. // Generate outer join result for all non-matched rows when the watermark advances. AddData(rightInput, (0, 30)), CheckNewAnswer(Row(3, null, 5, null), Row(null, 2, null, 5)), // states // left: (1, 30) // right: (0, 30) // // states evicted // left: (1, 5), (3, 5), (1, 5) (below watermark = 20) // right: (1, 10), (2, 5), (1, 9) (below watermark = 20) assertNumStateRows(total = 2, updated = 1) ) } test("self full outer join") { val (inputStream, query) = setupSelfJoin("full_outer") testStream(query)( AddData(inputStream, (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L)), CheckNewAnswer(Row(2, 2L, 2, 2L), Row(4, 4L, 4, 4L)), // batch 1 - global watermark = 0 // states // left: (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L) // right: (2, 2L), (4, 4L) assertNumStateRows(total = 7, updated = 7), AddData(inputStream, (6, 6L), (7, 7L), (8, 8L), (9, 9L), (10, 10L)), CheckNewAnswer(Row(6, 6L, 6, 6L), Row(8, 8L, 8, 8L), Row(10, 10L, 10, 10L)), // batch 2 - global watermark = 5 // states // left: (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L), (6, 6L), (7, 7L), (8, 8L), // (9, 9L), (10, 10L) // right: (6, 6L), (8, 8L), (10, 10L) // // states evicted // left: nothing (it waits for 5 seconds more than watermark due to join condition) // right: (2, 2L), (4, 4L) assertNumStateRows(total = 13, updated = 8), AddData(inputStream, (11, 11L), (12, 12L), (13, 13L), (14, 14L), (15, 15L)), CheckNewAnswer(Row(12, 12L, 12, 12L), Row(14, 14L, 14, 14L), Row(1, 1L, null, null), Row(3, 3L, null, null)), // batch 3 - global watermark = 9 // states // left: (4, 4L), (5, 5L), (6, 6L), (7, 7L), (8, 8L), (9, 9L), (10, 10L), (11, 11L), // (12, 12L), (13, 13L), (14, 14L), (15, 15L) // right: (10, 10L), (12, 12L), (14, 14L) // // states evicted // left: (1, 1L), (2, 2L), (3, 3L) // right: (6, 6L), (8, 8L) assertNumStateRows(total = 15, updated = 7) ) } } class StreamingLeftSemiJoinSuite extends StreamingJoinSuite { import testImplicits._ test("windowed left semi join") { val (leftInput, rightInput, joined) = setupWindowedJoin("left_semi") testStream(joined)( MultiAddData(leftInput, 1, 2, 3, 4, 5)(rightInput, 3, 4, 5, 6, 7), CheckNewAnswer(Row(3, 10, 6), Row(4, 10, 8), Row(5, 10, 10)), // states // left: 1, 2, 3, 4 ,5 // right: 3, 4, 5, 6, 7 assertNumStateRows(total = 10, updated = 10), MultiAddData(leftInput, 21)(rightInput, 22), // Watermark = 11, should remove rows having window=[0,10]. CheckNewAnswer(), // states // left: 21 // right: 22 // // states evicted // left: 1, 2, 3, 4 ,5 (below watermark) // right: 3, 4, 5, 6, 7 (below watermark) assertNumStateRows(total = 2, updated = 2), AddData(leftInput, 22), CheckNewAnswer(Row(22, 30, 44)), // Unlike inner/outer joins, given left input row matches with right input row, // we don't buffer the matched left input row to the state store. // // states // left: 21 // right: 22 assertNumStateRows(total = 2, updated = 0), StopStream, StartStream(), AddData(leftInput, 1), // Row not add as 1 < state key watermark = 12. CheckNewAnswer(), // states // left: 21 // right: 22 assertNumStateRows(total = 2, updated = 0, droppedByWatermark = 1), AddData(rightInput, 5), // Row not add as 5 < state key watermark = 12. CheckNewAnswer(), // states // left: 21 // right: 22 assertNumStateRows(total = 2, updated = 0, droppedByWatermark = 1) ) } test("left semi early state exclusion on left") { val (leftInput, rightInput, joined) = setupWindowedJoinWithLeftCondition("left_semi") testStream(joined)( MultiAddData(leftInput, 1, 2, 3)(rightInput, 3, 4, 5), // The left rows with leftValue <= 4 should not generate their semi join rows and // not get added to the state. CheckNewAnswer(Row(3, 10, 6)), // states // left: 3 // right: 3, 4, 5 assertNumStateRows(total = 4, updated = 4), // We shouldn't get more semi join rows when the watermark advances. MultiAddData(leftInput, 20)(rightInput, 21), CheckNewAnswer(), // states // left: 20 // right: 21 // // states evicted // left: 3 (below watermark) // right: 3, 4, 5 (below watermark) assertNumStateRows(total = 2, updated = 2), AddData(rightInput, 20), CheckNewAnswer((20, 30, 40)), // states // left: 20 // right: 21, 20 assertNumStateRows(total = 3, updated = 1) ) } test("left semi early state exclusion on right") { val (leftInput, rightInput, joined) = setupWindowedJoinWithRightCondition("left_semi") testStream(joined)( MultiAddData(leftInput, 3, 4, 5)(rightInput, 1, 2, 3), // The right rows with rightValue <= 7 should never be added to the state. // The right row with rightValue = 9 > 7, hence joined and added to state. CheckNewAnswer(Row(3, 10, 6)), // states // left: 3, 4, 5 // right: 3 assertNumStateRows(total = 4, updated = 4), // We shouldn't get more semi join rows when the watermark advances. MultiAddData(leftInput, 20)(rightInput, 21), CheckNewAnswer(), // states // left: 20 // right: 21 // // states evicted // left: 3, 4, 5 (below watermark) // right: 3 (below watermark) assertNumStateRows(total = 2, updated = 2), AddData(rightInput, 20), CheckNewAnswer((20, 30, 40)), // states // left: 20 // right: 21, 20 assertNumStateRows(total = 3, updated = 1) ) } test("left semi join with watermark range condition") { val (leftInput, rightInput, joined) = setupJoinWithRangeCondition("left_semi") testStream(joined)( AddData(leftInput, (1, 5), (3, 5)), CheckNewAnswer(), // states // left: (1, 5), (3, 5) // right: nothing assertNumStateRows(total = 2, updated = 2), AddData(rightInput, (1, 10), (2, 5)), // Match left row in the state. CheckNewAnswer((1, 5)), // states // left: (1, 5), (3, 5) // right: (1, 10), (2, 5) assertNumStateRows(total = 4, updated = 2), AddData(rightInput, (1, 9)), // No match as left row is already matched. CheckNewAnswer(), // states // left: (1, 5), (3, 5) // right: (1, 10), (2, 5), (1, 9) assertNumStateRows(total = 5, updated = 1), // Increase event time watermark to 20s by adding data with time = 30s on both inputs. AddData(leftInput, (1, 7), (1, 30)), CheckNewAnswer((1, 7)), // states // left: (1, 5), (3, 5), (1, 30) // right: (1, 10), (2, 5), (1, 9) assertNumStateRows(total = 6, updated = 1), // Watermark = 30 - 10 = 20, no matched row. AddData(rightInput, (0, 30)), CheckNewAnswer(), // states // left: (1, 30) // right: (0, 30) // // states evicted // left: (1, 5), (3, 5) (below watermark = 20) // right: (1, 10), (2, 5), (1, 9) (below watermark = 20) assertNumStateRows(total = 2, updated = 1) ) } test("self left semi join") { val (inputStream, query) = setupSelfJoin("left_semi") testStream(query)( AddData(inputStream, (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L)), CheckNewAnswer((2, 2), (4, 4)), // batch 1 - global watermark = 0 // states // left: (2, 2L), (4, 4L) // (left rows with value % 2 != 0 is filtered per [[PushPredicateThroughJoin]]) // right: (2, 2L), (4, 4L) // (right rows with value % 2 != 0 is filtered per [[PushPredicateThroughJoin]]) assertNumStateRows(total = 4, updated = 4), AddData(inputStream, (6, 6L), (7, 7L), (8, 8L), (9, 9L), (10, 10L)), CheckNewAnswer((6, 6), (8, 8), (10, 10)), // batch 2 - global watermark = 5 // states // left: (2, 2L), (4, 4L), (6, 6L), (8, 8L), (10, 10L) // right: (6, 6L), (8, 8L), (10, 10L) // // states evicted // left: nothing (it waits for 5 seconds more than watermark due to join condition) // right: (2, 2L), (4, 4L) assertNumStateRows(total = 8, updated = 6), AddData(inputStream, (11, 11L), (12, 12L), (13, 13L), (14, 14L), (15, 15L)), CheckNewAnswer((12, 12), (14, 14)), // batch 3 - global watermark = 9 // states // left: (4, 4L), (6, 6L), (8, 8L), (10, 10L), (12, 12L), (14, 14L) // right: (10, 10L), (12, 12L), (14, 14L) // // states evicted // left: (2, 2L) // right: (6, 6L), (8, 8L) assertNumStateRows(total = 9, updated = 4) ) } }
maropu/spark
sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingJoinSuite.scala
Scala
apache-2.0
60,323
package io.getquill import com.github.mauricio.async.db.{ QueryResult => DBQueryResult } import com.github.mauricio.async.db.pool.PartitionedConnectionPool import com.github.mauricio.async.db.postgresql.PostgreSQLConnection import com.typesafe.config.Config import io.getquill.context.async.{ ArrayDecoders, ArrayEncoders, AsyncContext, UUIDObjectEncoding } import io.getquill.util.LoadConfig import io.getquill.util.Messages.fail class PostgresAsyncContext[N <: NamingStrategy](naming: N, pool: PartitionedConnectionPool[PostgreSQLConnection]) extends AsyncContext(PostgresDialect, naming, pool) with ArrayEncoders with ArrayDecoders with UUIDObjectEncoding { def this(naming: N, config: PostgresAsyncContextConfig) = this(naming, config.pool) def this(naming: N, config: Config) = this(naming, PostgresAsyncContextConfig(config)) def this(naming: N, configPrefix: String) = this(naming, LoadConfig(configPrefix)) override protected def extractActionResult[O](returningColumn: String, returningExtractor: Extractor[O])(result: DBQueryResult): O = { result.rows match { case Some(r) if r.nonEmpty => returningExtractor(r.head) case _ => fail("This is a bug. Cannot extract returning value.") } } override protected def expandAction(sql: String, returningColumn: String): String = s"$sql RETURNING $returningColumn" }
mentegy/quill
quill-async-postgres/src/main/scala/io/getquill/PostgresAsyncContext.scala
Scala
apache-2.0
1,382
import org.scalatest.{FunSuite, Matchers} class ChangeTest extends FunSuite with Matchers { test("single coin change") { Change.findFewestCoins(25, List(1, 5, 10, 25, 100)) should be (Some(List(25))) } test("multiple coin change") { Change.findFewestCoins(15, List(1, 5, 10, 25, 100)) should be (Some(List(5, 10))) } test("change with Lilliputian Coins") { Change.findFewestCoins(23, List(1, 4, 15, 20, 50)) should be (Some(List(4, 4, 15))) } test("change with Lower Elbonia Coins") { Change.findFewestCoins(63, List(1, 5, 10, 21, 25)) should be (Some(List(21, 21, 21))) } test("large target values") { Change.findFewestCoins(999, List(1, 2, 5, 10, 20, 50, 100)) should be (Some(List(2, 2, 5, 20, 20, 50, 100, 100, 100, 100, 100, 100, 100, 100, 100))) } test("no coins make 0 change") { Change.findFewestCoins(0, List(1, 5, 10, 21, 25)) should be (Some(List())) } test("error testing for change smaller than the smallest of coins") { Change.findFewestCoins(3, List(5, 10)) should be (None) } test("cannot find negative change values") { Change.findFewestCoins(-5, List(1, 2, 5)) should be (None) } }
daewon/til
exercism/scala/change/src/test/scala/ChangeTest.scala
Scala
mpl-2.0
1,178
package com.sksamuel.elastic4s trait HitReader[T] { def read(hit: Hit): Either[Throwable, T] } trait AggReader[T] { def read(json: String): Either[Throwable, T] }
Tecsisa/elastic4s
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/HitReader.scala
Scala
apache-2.0
169
def go(x: Int): Unit = go // error go // error go // error def foo: Unit = (x: Int) => go(x) // warning
dotty-staging/dotty
tests/neg/i11761.scala
Scala
apache-2.0
117
package app.utils import slick.driver.PostgresDriver.simple._ import scala.slick.jdbc.meta.MTable import app.models.TaskDAO import app.{ Configs => C } trait PostgresSupport { def db = Database.forURL( url = s"jdbc:postgresql://${C.pgHost}:${C.pgPort}/${C.pgDBName}", driver = C.pgDriver ) implicit val session: Session = db.createSession() def startPostgres() = { if (MTable.getTables("tasks").list.isEmpty) { TaskDAO.createTable } } }
cfmcgrady/spray-akka-slick-postgres
src/main/scala/app/utils/PostgresSupport.scala
Scala
mit
476
/* * Copyright 2014-2020 Rik van der Kleij * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package intellij.haskell.editor import java.util import com.intellij.codeInsight.editorActions.ExtendWordSelectionHandler import com.intellij.openapi.editor.Editor import com.intellij.openapi.util.TextRange import com.intellij.psi.tree.IElementType import com.intellij.psi.{PsiComment, PsiElement, PsiWhiteSpace} import intellij.haskell.HaskellFile import intellij.haskell.psi.HaskellPsiUtil import intellij.haskell.psi.HaskellTypes._ import scala.collection.mutable.ListBuffer import scala.jdk.CollectionConverters._ class HaskellExtendWordSelectionHandler extends ExtendWordSelectionHandler { override def canSelect(e: PsiElement): Boolean = { Option(e.getContainingFile).exists(_.isInstanceOf[HaskellFile]) && !e.isInstanceOf[PsiComment] && !e.isInstanceOf[PsiWhiteSpace] } override def select(e: PsiElement, editorText: CharSequence, cursorOffset: Int, editor: Editor): util.List[TextRange] = { val startOffset = e.getTextRange.getStartOffset val nextEndOffsets = getOffsets(Some(e), ListBuffer.empty[(Int, IElementType)], (e: PsiElement) => e.getNextSibling, (e: PsiElement) => (e.getTextRange.getEndOffset, e.getNode.getElementType)) val prevStartOffsets = getOffsets(Option(e.getPrevSibling), ListBuffer.empty[(Int, IElementType)], (e: PsiElement) => e.getPrevSibling, (e: PsiElement) => (e.getTextRange.getStartOffset, e.getNode.getElementType)) val lastEndOffset = nextEndOffsets.lastOption.getOrElse((e.getTextRange.getEndOffset, e.getNode.getElementType)) val allSelectOptions = nextEndOffsets.map(eo => (e.getNode.getElementType, eo._2, new TextRange(startOffset, eo._1))) ++ prevStartOffsets.map(so => (so._2, lastEndOffset._2, new TextRange(so._1, lastEndOffset._1))) allSelectOptions.filter(x => if (x._1 == HS_LEFT_PAREN) { x._2 == HS_RIGHT_PAREN } else if (x._1 == HS_LEFT_BRACE) { x._2 == HS_RIGHT_BRACE } else if (x._1 == HS_LEFT_BRACKET) { x._2 == HS_RIGHT_BRACKET } else if (x._2 == HS_RIGHT_PAREN) { x._1 == HS_LEFT_PAREN } else if (x._2 == HS_RIGHT_BRACE) { x._1 == HS_LEFT_BRACE } else if (x._2 == HS_RIGHT_BRACKET) { x._1 == HS_LEFT_BRACKET } else { true } ).map(_._3).asJava } private def getOffsets(element: Option[PsiElement], offsets: ListBuffer[(Int, IElementType)], getSibling: PsiElement => PsiElement, getOffset: PsiElement => (Int, IElementType)): ListBuffer[(Int, IElementType)] = { def recur(e: PsiElement): ListBuffer[(Int, IElementType)] = { getOffsets(Option(getSibling(e)), offsets, getSibling, getOffset) } element match { case Some(e) => HaskellPsiUtil.findQualifiedName(e) match { case None => e match { case e: PsiWhiteSpace => recur(e) case e: PsiElement if e.getNode.getElementType == HS_COMMA => recur(e) case e: PsiElement if e.getNode.getElementType == HS_NEWLINE | e.getNode.getElementType == HS_EQUAL | e.getNode.getElementType == HS_LEFT_ARROW => offsets case _ => offsets += getOffset(e) recur(e) } case Some(qe) => offsets += getOffset(qe) recur(qe) } case None => offsets } } }
rikvdkleij/intellij-haskell
src/main/scala/intellij/haskell/editor/HaskellExtendWordSelectionHandler.scala
Scala
apache-2.0
3,875
package com.campudus.vertx.database import com.campudus.vertx.Verticle import com.github.mauricio.async.db.postgresql.util.URLParser import com.github.mauricio.async.db.Configuration import com.github.mauricio.async.db.postgresql.pool.PostgreSQLConnectionFactory import com.github.mauricio.async.db.pool.ConnectionPool import com.github.mauricio.async.db.pool.PoolConfiguration import com.campudus.vertx.VertxExecutionContext import com.campudus.vertx.database.pool.AsyncConnectionPool import org.vertx.scala.core.eventbus.EventBus._ import org.vertx.scala.core.json._ class Starter extends Verticle { var handler: ConnectionHandler = null override def start(startedResult: org.vertx.scala.core.Future[Void]) = { logger.error("Starting async database module for MySQL and PostgreSQL.") try { val config = Option(container.config()).getOrElse(Json.emptyObj()) val address = config.getString("address", "campudus.asyncdb") val dbType = getDatabaseType(config) val configuration = getConfiguration(config, dbType) handler = dbType match { case "postgresql" => new PostgreSqlConnectionHandler(this, configuration) case "mysql" => new MySqlConnectionHandler(this, configuration) } vertx.eventBus.registerHandler(address)(handler) logger.error("Async database module for MySQL and PostgreSQL started with config " + configuration) startedResult.setResult(null) } catch { case ex: Throwable => logger.fatal("could not start async database module!", ex) startedResult.setFailure(ex) } } override def stop() { Option(handler).map(_.close) } private def getDatabaseType(config: JsonObject) = { config.getString("connection", "postgresql").toLowerCase match { case "postgresql" => "postgresql" case "mysql" => "mysql" case x => throw new IllegalArgumentException("unknown connection type " + x) } } private def getConfiguration(config: JsonObject, dbType: String) = { val host = config.getString("host", "localhost") val port = config.getInteger("port", defaultPortFor(dbType)) val username = config.getString("username", defaultUserFor(dbType)) val password = Option(config.getString("password")).orElse(defaultPasswordFor(dbType)) val database = Option(config.getString("database")).orElse(defaultDatabaseFor(dbType)) Configuration(username, host, port, password, database) } private def defaultPortFor(connection: String): Integer = connection match { case "postgresql" => 5432 case "mysql" => 3306 } private def defaultDatabaseFor(connection: String): Option[String] = connection match { case _ => Some("testdb") } private def defaultUserFor(connection: String): String = connection match { case "postgresql" => "vertx" case "mysql" => "root" } private def defaultPasswordFor(connection: String): Option[String] = connection match { case "postgresql" => Some("test") case "mysql" => None } }
campudus/vertx-mysql-postgresql
src/main/scala/com/campudus/vertx/database/Starter.scala
Scala
apache-2.0
3,026
/* * Copyright 2011 Typesafe Inc. * * This work is based on the original contribution of WeigleWilczek. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.typesafe.sbteclipse.core private object EclipseOpts { val ExecutionEnvironment = "execution-environment" val SkipParents = "skip-parents" val WithSource = "with-source" val WithJavadoc = "with-javadoc" val UseProjectId = "use-project-id" val WithBundledScalaContainers = "with-bundled-scala-containers" }
typesafehub/sbteclipse
src/main/scala/com/typesafe/sbteclipse/core/EclipseOpts.scala
Scala
apache-2.0
1,005
package com.twitter.zipkin.common.mustache import com.github.mustachejava.DefaultMustacheFactory import com.twitter.mustache.ScalaObjectHandler import java.io._ import collection.JavaConversions.mapAsJavaMap class ZipkinMustache(templateRoot: String, cache: Boolean) { import java.io.Reader class ZipkinMustacheFactory extends DefaultMustacheFactory() { override def getReader(rn: String): Reader = { // hack to get partials to work properly val name = if (rn.startsWith("public")) rn else "templates/" + rn if (cache) super.getReader(name) else { val file = new File(templateRoot, name) new BufferedReader(new InputStreamReader(new FileInputStream(file), "UTF8")) } } def invalidateCaches() { mustacheCache.invalidateAll() templateCache.invalidateAll() } } private[this] val mf = new ZipkinMustacheFactory //TODO: why isn't the scala handler coercing maps properly? mf.setObjectHandler(new ScalaObjectHandler) def render(template: String, data: Map[String, Object]): String = { if (!cache) mf.invalidateCaches() val mustache = mf.compile(template) val output = new StringWriter mustache.execute(output, mapAsJavaMap(data)).flush() output.toString } }
wyzssw/zipkin
zipkin-web/src/main/scala/com/twitter/zipkin/common/mustache/ZipkinMustache.scala
Scala
apache-2.0
1,262
package com.github.scalaz_examples.equal import scalaz._ import Scalaz._ object WhyShouldICare extends App { // In normal java, == is only reference compair, so you must always use an 'equals' method // In normal scala, == is a function that will mostly just call equals // what is the issue with equals? def equals(obj: AnyRef): Boolean // IT TAKES EVERYTHING! val string = "Hello World" val number = 1234 assert(string != number, "The values are diff, why did they match?") // whats the problem? It gave the right answer! // well, the == would never work here since the types are not similar, so why can't the compiler tell me? } object BasicExample extends App { // how can we get the above to fail at compile time? val number = 1234 val revNumber = 4321 assert(number =/= revNumber, "The numbers are diff, but they matched?") assert(number === number, "Should have matched") // ok, so whats different so far? val string = "1234" // assert(number === string, "Shouldn't have compiled!") // GREAT! This check didn't make sense, so now the compiler catches it! } object WhatIfIWantFuzzyMatching extends App { // name must be intInstance to override existing intInstance implicit implicit object intInstance extends Equal[Int] { override def equal(left: Int, right: Int): Boolean = { val leftMod = left % 2 val rightMod = right % 2 leftMod == rightMod } } val even = 2 val odd = 3 assert(even =/= odd, "Shouldn't have matched!") val evenMultTwo = even * 2 assert(even === evenMultTwo, "Both are even, so should have matched") } object WhatIfIWantToSwitchBack extends App { // so what if I want to switch back to the other Equals? object modEqualsInt extends Equal[Int] { override def equal(left: Int, right: Int): Boolean = { val leftMod = left % 2 val rightMod = right % 2 leftMod == rightMod } } implicit var intInstance: Equal[Int] = Scalaz.intInstance assert(2 =/= 4) intInstance = modEqualsInt assert(2 === 4) intInstance = Scalaz.intInstance assert(2 =/= 4) }
dcapwell/scalaz-examples
src/main/scala/com/github/scalaz_examples/equal/Example.scala
Scala
mit
2,110
import sbt._ import Keys._ import play.Project._ import com.github.play2war.plugin._ object ApplicationBuild extends Build { val appName = "hed-editor-services" val appVersion = "1.0" val appDependencies = Seq( // Add your project dependencies here, "org.avaje.ebeanorm" % "avaje-ebeanorm-api" % "3.1.1", //"xml-apis" % "xml-apis" % "1.4.01" force(), "xerces" % "xercesImpl" % "2.10.0" force(), "com.hermit-reasoner" % "org.semanticweb.hermit" % "1.3.8.2" force(), "org.apache.jena" % "jena-core" % "2.11.0" force(), "com.hp.hpl.jena" % "arq" % "2.8.5" force(), "org.apache.stanbol" % "org.apache.stanbol.client" % "0.20.0-SHARP" force() exclude("org.slf4j","slf4j-log4j12"), javaCore, javaEbean, "com.clarkparsia.empire" % "empire" % "0.8.4" force() exclude("xerces", "xercesImpl") exclude("xml-apis", "xml-apis"), "org.drools" % "drools-core" % "5.6.0.Final" force() exclude("xerces", "xercesImpl") exclude("xml-apis", "xml-apis"), "org.drools" % "drools-compiler" % "5.6.0.Final" force() exclude("xerces", "xercesImpl") exclude("xml-apis", "xml-apis"), "org.drools" % "drools-shapes-utilities" % "0.5.6.Final" exclude("xerces", "xercesImpl") exclude("xml-apis", "xml-apis") exclude("com.hp.hpl.jena", "jena") exclude("com.hp.hpl.jena", "arq") exclude("thewebsemantic","jenabean") exclude("com.clarkparsia","Empire-core"), "org.drools" % "drools-shapes-generator" % "0.5.6.Final" exclude("xerces", "xercesImpl") exclude("xml-apis", "xml-apis") exclude("thewebsemantic","jenabean") exclude("com.clarkparsia","Empire-core"), // "com.google.gdata" % "core" % "1.47.1" exclude("xml-apis","xml-apis"), "edu.mayo.cts2.framework" % "model" % "0.8.4" withSources() withJavadoc() exclude("org.slf4j","slf4j-log4j12"), "edu.mayo.cts2.framework" % "core" % "0.8.4" withSources() withJavadoc() exclude("org.slf4j","slf4j-log4j12"), "sharpc2b" % "sharp-editor" % "0.1-SNAPSHOT" exclude("xerces", "xercesImpl") exclude("xml-apis", "xml-apis") exclude( "org.apache.maven.doxia", "doxia-site-renderer" ), "sharpc2b" % "import-export" % "0.1-SNAPSHOT" exclude("xerces", "xercesImpl") exclude("xml-apis", "xml-apis") exclude( "org.apache.maven.doxia", "doxia-site-renderer" ), "sharpc2b" % "editor-core" % "0.1-SNAPSHOT" exclude("xerces", "xercesImpl") exclude("xml-apis", "xml-apis") exclude( "org.apache.maven.doxia", "doxia-site-renderer" ) // , // Note at website (http://code.google.com/p/rest-assured/wiki/GettingStarted) // says put restassured before JUnit to ensure right version of hamcrest // // "com.jayway.restassured" % "rest-assured" % "1.8.1", // javaJdbc, // "mysql" % "mysql-connector-java" % "5.1.21", // jdbc // anorm ) // val libraryDependencies = Seq( // // ) val main = play.Project(appName, appVersion, appDependencies).settings( // Add your own project settings here testOptions in Test += Tests.Argument("junitxml", "console") // testOptions += Tests.Argument(TestFrameworks.JUnit, "-a", "-s") , resolvers += "informatics-releases" at "http://informatics.mayo.edu/maven/content/repositories/releases/", resolvers += "informatics-snapshots" at "http://informatics.mayo.edu/maven/content/repositories/snapshots/" , resolvers += "atlassian groups" at "https://maven.atlassian.com/content/groups/public/" , // resolvers += "atlassian" at "https://maven.atlassian.com/content/repositories/atlassian-public/" resolvers += "Local Maven Repository" at "file://"+Path.userHome.absolutePath+"/.m2/repository" ) // Resolvers. .settings(Play2WarPlugin.play2WarSettings: _*) .settings(Play2WarKeys.servletVersion := "3.0") }
sotty/sharp-editor
hed-services/project/Build.scala
Scala
apache-2.0
3,760
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package spark.io import java.io.{InputStream, OutputStream} import com.ning.compress.lzf.{LZFInputStream, LZFOutputStream} import org.xerial.snappy.{SnappyInputStream, SnappyOutputStream} /** * CompressionCodec allows the customization of choosing different compression implementations * to be used in block storage. */ trait CompressionCodec { def compressedOutputStream(s: OutputStream): OutputStream def compressedInputStream(s: InputStream): InputStream } private[spark] object CompressionCodec { def createCodec(): CompressionCodec = { // Set the default codec to Snappy since the LZF implementation initializes a pretty large // buffer for every stream, which results in a lot of memory overhead when the number of // shuffle reduce buckets are large. createCodec(classOf[SnappyCompressionCodec].getName) } def createCodec(codecName: String): CompressionCodec = { Class.forName( System.getProperty("spark.io.compression.codec", codecName), true, Thread.currentThread.getContextClassLoader).newInstance().asInstanceOf[CompressionCodec] } } /** * LZF implementation of [[spark.io.CompressionCodec]]. */ class LZFCompressionCodec extends CompressionCodec { override def compressedOutputStream(s: OutputStream): OutputStream = { new LZFOutputStream(s).setFinishBlockOnFlush(true) } override def compressedInputStream(s: InputStream): InputStream = new LZFInputStream(s) } /** * Snappy implementation of [[spark.io.CompressionCodec]]. * Block size can be configured by spark.io.compression.snappy.block.size. */ class SnappyCompressionCodec extends CompressionCodec { override def compressedOutputStream(s: OutputStream): OutputStream = { val blockSize = System.getProperty("spark.io.compression.snappy.block.size", "32768").toInt new SnappyOutputStream(s, blockSize) } override def compressedInputStream(s: InputStream): InputStream = new SnappyInputStream(s) }
bavardage/spark
core/src/main/scala/spark/io/CompressionCodec.scala
Scala
apache-2.0
2,767
package com.shekhargulati.sentiment_analyzer import org.scalatest.{FunSpec, Matchers} class SentimentAnalyzerSpec extends FunSpec with Matchers { describe("sentiment analyzer") { it("should return POSITIVE when input has positive emotion") { val input = "Scala is a great general purpose language." val sentiment = SentimentAnalyzer.mainSentiment(input) sentiment should be(Sentiment.POSITIVE) } it("should return NEGATIVE when input has negative emotion") { val input = "Dhoni laments bowling, fielding errors in series loss" val sentiment = SentimentAnalyzer.mainSentiment(input) sentiment should be(Sentiment.NEGATIVE) } it("should return NEUTRAL when input has no emotion") { val input = "I am reading a book" val sentiment = SentimentAnalyzer.mainSentiment(input) sentiment should be(Sentiment.NEUTRAL) } } }
shekhargulati/52-technologies-in-2016
03-stanford-corenlp/sentiment-analyzer/src/test/scala/com/shekhargulati/sentiment_analyzer/SentimentAnalyzerSpec.scala
Scala
mit
902
package bootstrap.liftweb import net.liftweb._ import util._ import common._ import http._ import sitemap._ import Loc._ import mapper.{DB, StandardDBVendor, Schemifier} import code.model._ import net.liftmodules.{fobo, fobojqres} import scravatar.{Gravatar, DefaultImage} /** * A class that's instantiated early and run. It allows the application * to modify lift's environment */ class Boot { def boot { if (!DB.jndiJdbcConnAvailable_?) { val vendor = new StandardDBVendor( Props.get("db.driver") openOr "org.h2.Driver", Props.get("db.url") openOr "jdbc:h2:lift_proto.db;AUTO_SERVER=TRUE", Props.get("db.user"), Props.get("db.password") ) LiftRules.unloadHooks.append(vendor.closeAllConnections_! _) DB.defineConnectionManager(DefaultConnectionIdentifier, vendor) } // Use Lift's Mapper ORM to populate the database // you don't need to use Mapper to use Lift... use // any ORM you want Schemifier.schemify(true, Schemifier.infoF _, User) // where to search snippet LiftRules.addToPackages("code") def sitemapMutators = User.sitemapMutator //The SiteMap is built in the Site object bellow LiftRules.setSiteMapFunc(() => sitemapMutators(Site.sitemap)) //Init the FoBo - Front-End Toolkit module, //see http://liftweb.net/lift_modules for more info // Demonstrating the use of Resource and API initiation instead of using Toolkit (that includes both resource and api). fobo.Resource.init = fobo.Resource.JQuery224 //fobo.Resource.init=fobo.Resource.JQueryMigrate141 //trying out the migrate resource //fobojqres.Resource.init=fobojqres.Resource.JQueryMigrate141 //the same as above but directly from jq's resource module. fobo.Resource.init = fobo.Resource.FontAwesome463 fobo.Resource.init = fobo.Resource.AJMaterial114 //fobo.Toolkit.init=fobo.Toolkit.AngularJS153 // same as using resource and api below fobo.Resource.init = fobo.Resource.AngularJS153 //rem if using AngularJS toolkit above fobo.API.init = fobo.API.Angular1 //rem if using AngularJS toolkit above //Show the spinny image when an Ajax call starts LiftRules.ajaxStart = Full( () => LiftRules.jsArtifacts.show("ajax-loader").cmd) // Make the spinny image go away when it ends LiftRules.ajaxEnd = Full( () => LiftRules.jsArtifacts.hide("ajax-loader").cmd) // Force the request to be UTF-8 LiftRules.early.append(_.setCharacterEncoding("UTF-8")) // What is the function to test if a user is logged in? LiftRules.loggedInTest = Full(() => User.loggedIn_?) // Use HTML5 for rendering LiftRules.htmlProperties.default.set((r: Req) => new Html5Properties(r.userAgent)) LiftRules.securityRules = () => { SecurityRules( content = Some( ContentSecurityPolicy( scriptSources = List(ContentSourceRestriction.UnsafeEval, ContentSourceRestriction.UnsafeInline, ContentSourceRestriction.Self), styleSources = List(ContentSourceRestriction.UnsafeInline, ContentSourceRestriction.Self), imageSources = List(ContentSourceRestriction.All) ))) } // Make a transaction span the whole HTTP request S.addAround(DB.buildLoanWrapper) } object Site { import scala.xml._ //if user is logged in replace menu label "User" with users gravatar image and full name. def userDDLabel: NodeSeq = { def gravatar: NodeSeq = { val gurl = Gravatar( User.currentUser .map(u => u.email.get) .openOrThrowException("Something wicked happened #1")) .size(36) .avatarUrl <img class="gravatar" src={gurl}/> } lazy val username = User.currentUser.map(u => u.firstName + " " + u.lastName) User.loggedIn_? match { case true => <xml:group>{gravatar} {username.openOrThrowException("Something wicked happened")}</xml:group> case _ => <xml:group>{S ? "UserDDLabel"}</xml:group> } } //val ddLabel1 = Menu(userDDLabel) / "ddlabel1" val divider1 = Menu("divider1") / "divider1" val home = Menu.i("Home") / "index" val userMenu = User.AddUserMenusHere val static = Menu( Loc("Static", Link(List("static"), true, "/static/index"), S.loc("StaticContent", scala.xml.Text("Static Content")), LocGroup("lg2", "topRight"))) val AMDesign = Menu( Loc( "AMDesign", ExtLink("https://material.angularjs.org/"), S.loc("AMDesign", scala.xml.Text("Angular Material")), LocGroup("lg2") /*, fobo.TBLocInfo.LinkTargetBlank */ )) val FLTDemo = Menu( Loc( "FLTDemo", ExtLink("http://www.media4u101.se/fobo-lift-template-demo/"), S.loc("FLTDemo", scala.xml.Text("FoBo Lift Template Demo")), LocGroup("lg2") /*, fobo.TBLocInfo.LinkTargetBlank */ )) def sitemap = SiteMap( home >> LocGroup("lg1"), static, AMDesign, FLTDemo // ddLabel1 // ddLabel1 >> LocGroup("topRight") >> PlaceHolder submenus ( // divider1 /*>> fobo.TBLocInfo.Divider*/ >> userMenu // ) ) } }
karma4u101/FoBo-Demo
pimping-lift-advanced-md/src/main/scala/bootstrap/liftweb/Boot.scala
Scala
apache-2.0
5,402
package org.sgine.ui.render /** * TextureCoordinates is a convenience class for generating common scenarios of texture coordinates. * * @author Matt Hicks <[email protected]> */ object TextureCoordinates { /** * Creates texture coordinates for the section specified of a texture based on the dimensions of * the texture supplied. */ def rectCoords(x: Double, y: Double, width: Double, height: Double, textureWidth: Double, textureHeight: Double) = { val left = x / textureWidth val right = (x + width) / textureWidth val top = y / textureHeight val bottom = (y + height) / textureHeight List( left, top, right, top, left, bottom, right, bottom, left, bottom, right, top ) } def slice(x1: Double, y1: Double, x2: Double, y2: Double, width: Double, height: Double) = { rectCoords(x1, y1, x2 - x1, y2 - y1, width, height) } /** * Generic generation of texture coordinates presuming use of the entire texture. */ def rect(flipHorizontal: Boolean = false, flipVertical: Boolean = false) = { val left = if (flipHorizontal) { 1.0 } else { 0.0 } val right = if (flipHorizontal) { 0.0 } else { 1.0 } val top = if (flipVertical) { 1.0 } else { 0.0 } val bottom = if (flipVertical) { 0.0 } else { 1.0 } List( left, top, right, top, left, bottom, right, bottom, left, bottom, right, top ) } /** * Generic generation of texture coordinates presuming use of entire texture. */ def box() = { rect() ::: // Front rect(true) ::: // Back rect() ::: // Left rect(true) ::: // Right rect() ::: // Top rect(false, true) // Bottom } def scale9(x1: Double, y1: Double, x2: Double, y2: Double, width: Double, height: Double) = { slice(0.0, 0.0, x1, y1, width, height) ::: // Top-Left slice(x1, 0.0, x2, y1, width, height) ::: // Top slice(x2, 0.0, width, y1, width, height) ::: // Top-Right slice(0.0, y1, x1, y2, width, height) ::: // Left slice(x1, y1, x2, y2, width, height) ::: // Center slice(x2, y1, width, y2, width, height) ::: // Right slice(0.0, y2, x1, height, width, height) ::: // Bottom-Left slice(x1, y2, x2, height, width, height) ::: // Bottom slice(x2, y2, width, height, width, height) // Bottom-Right } }
Axiometry/sgine
ui/src/main/scala/org/sgine/ui/render/TextureCoordinates.scala
Scala
bsd-3-clause
2,534
/* * Copyright (c) 2016 JLCM * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package de.beikern.quilltests.schemas import de.beikern.quilltests.daos.Dao.{Bar, Foo} import io.getquill.context.Context trait Schema { this: Context[_, _] => val mappedFoo = quote { querySchema[Foo]( "foo_table", _.field1 -> "f1", _.field2 -> "f2" ) } val mappedBar = quote { querySchema[Bar]( "bar_table", _.field1 -> "wololo", _.field2 -> "oyoyoy" ) } }
beikern/quilltests
src/main/scala/de/beikern/quilltests/schemas/Schema.scala
Scala
mit
1,543
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.analysis import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.streaming.InternalOutputModes import org.apache.spark.sql.streaming.OutputMode /** * Analyzes the presence of unsupported operations in a logical plan. */ object UnsupportedOperationChecker { def checkForBatch(plan: LogicalPlan): Unit = { plan.foreachUp { case p if p.isStreaming => throwError("Queries with streaming sources must be executed with writeStream.start()")(p) case _ => } } def checkForStreaming(plan: LogicalPlan, outputMode: OutputMode): Unit = { if (!plan.isStreaming) { throwError( "Queries without streaming sources cannot be executed with writeStream.start()")(plan) } /** Collect all the streaming aggregates in a sub plan */ def collectStreamingAggregates(subplan: LogicalPlan): Seq[Aggregate] = { subplan.collect { case a: Aggregate if a.isStreaming => a } } val mapGroupsWithStates = plan.collect { case f: FlatMapGroupsWithState if f.isStreaming && f.isMapGroupsWithState => f } // Disallow multiple `mapGroupsWithState`s. if (mapGroupsWithStates.size >= 2) { throwError( "Multiple mapGroupsWithStates are not supported on a streaming DataFrames/Datasets")(plan) } val flatMapGroupsWithStates = plan.collect { case f: FlatMapGroupsWithState if f.isStreaming && !f.isMapGroupsWithState => f } // Disallow mixing `mapGroupsWithState`s and `flatMapGroupsWithState`s if (mapGroupsWithStates.nonEmpty && flatMapGroupsWithStates.nonEmpty) { throwError( "Mixing mapGroupsWithStates and flatMapGroupsWithStates are not supported on a " + "streaming DataFrames/Datasets")(plan) } // Only allow multiple `FlatMapGroupsWithState(Append)`s in append mode. if (flatMapGroupsWithStates.size >= 2 && ( outputMode != InternalOutputModes.Append || flatMapGroupsWithStates.exists(_.outputMode != InternalOutputModes.Append) )) { throwError( "Multiple flatMapGroupsWithStates are not supported when they are not all in append mode" + " or the output mode is not append on a streaming DataFrames/Datasets")(plan) } // Disallow multiple streaming aggregations val aggregates = collectStreamingAggregates(plan) if (aggregates.size > 1) { throwError( "Multiple streaming aggregations are not supported with " + "streaming DataFrames/Datasets")(plan) } // Disallow some output mode outputMode match { case InternalOutputModes.Append if aggregates.nonEmpty => val aggregate = aggregates.head // Find any attributes that are associated with an eventTime watermark. val watermarkAttributes = aggregate.groupingExpressions.collect { case a: Attribute if a.metadata.contains(EventTimeWatermark.delayKey) => a } // We can append rows to the sink once the group is under the watermark. Without this // watermark a group is never "finished" so we would never output anything. if (watermarkAttributes.isEmpty) { throwError( s"$outputMode output mode not supported when there are streaming aggregations on " + s"streaming DataFrames/DataSets without watermark")(plan) } case InternalOutputModes.Complete if aggregates.isEmpty => throwError( s"$outputMode output mode not supported when there are no streaming aggregations on " + s"streaming DataFrames/Datasets")(plan) case _ => } /** * Whether the subplan will contain complete data or incremental data in every incremental * execution. Some operations may be allowed only when the child logical plan gives complete * data. */ def containsCompleteData(subplan: LogicalPlan): Boolean = { val aggs = subplan.collect { case a@Aggregate(_, _, _) if a.isStreaming => a } // Either the subplan has no streaming source, or it has aggregation with Complete mode !subplan.isStreaming || (aggs.nonEmpty && outputMode == InternalOutputModes.Complete) } plan.foreachUp { implicit subPlan => // Operations that cannot exists anywhere in a streaming plan subPlan match { case Aggregate(_, aggregateExpressions, child) => val distinctAggExprs = aggregateExpressions.flatMap { expr => expr.collect { case ae: AggregateExpression if ae.isDistinct => ae } } throwErrorIf( child.isStreaming && distinctAggExprs.nonEmpty, "Distinct aggregations are not supported on streaming DataFrames/Datasets. Consider " + "using approx_count_distinct() instead.") case _: Command => throwError("Commands like CreateTable*, AlterTable*, Show* are not supported with " + "streaming DataFrames/Datasets") // mapGroupsWithState and flatMapGroupsWithState case m: FlatMapGroupsWithState if m.isStreaming => // Check compatibility with output modes and aggregations in query val aggsAfterFlatMapGroups = collectStreamingAggregates(plan) if (m.isMapGroupsWithState) { // check mapGroupsWithState // allowed only in update query output mode and without aggregation if (aggsAfterFlatMapGroups.nonEmpty) { throwError( "mapGroupsWithState is not supported with aggregation " + "on a streaming DataFrame/Dataset") } else if (outputMode != InternalOutputModes.Update) { throwError( "mapGroupsWithState is not supported with " + s"$outputMode output mode on a streaming DataFrame/Dataset") } } else { // check latMapGroupsWithState if (aggsAfterFlatMapGroups.isEmpty) { // flatMapGroupsWithState without aggregation: operation's output mode must // match query output mode m.outputMode match { case InternalOutputModes.Update if outputMode != InternalOutputModes.Update => throwError( "flatMapGroupsWithState in update mode is not supported with " + s"$outputMode output mode on a streaming DataFrame/Dataset") case InternalOutputModes.Append if outputMode != InternalOutputModes.Append => throwError( "flatMapGroupsWithState in append mode is not supported with " + s"$outputMode output mode on a streaming DataFrame/Dataset") case _ => } } else { // flatMapGroupsWithState with aggregation: update operation mode not allowed, and // *groupsWithState after aggregation not allowed if (m.outputMode == InternalOutputModes.Update) { throwError( "flatMapGroupsWithState in update mode is not supported with " + "aggregation on a streaming DataFrame/Dataset") } else if (collectStreamingAggregates(m).nonEmpty) { throwError( "flatMapGroupsWithState in append mode is not supported after " + s"aggregation on a streaming DataFrame/Dataset") } } } // Check compatibility with timeout configs if (m.timeout == EventTimeTimeout) { // With event time timeout, watermark must be defined. val watermarkAttributes = m.child.output.collect { case a: Attribute if a.metadata.contains(EventTimeWatermark.delayKey) => a } if (watermarkAttributes.isEmpty) { throwError( "Watermark must be specified in the query using " + "'[Dataset/DataFrame].withWatermark()' for using event-time timeout in a " + "[map|flatMap]GroupsWithState. Event-time timeout not supported without " + "watermark.")(plan) } } case d: Deduplicate if collectStreamingAggregates(d).nonEmpty => throwError("dropDuplicates is not supported after aggregation on a " + "streaming DataFrame/Dataset") case Join(left, right, joinType, _) => joinType match { case _: InnerLike => if (left.isStreaming && right.isStreaming) { throwError("Inner join between two streaming DataFrames/Datasets is not supported") } case FullOuter => if (left.isStreaming || right.isStreaming) { throwError("Full outer joins with streaming DataFrames/Datasets are not supported") } case LeftOuter | LeftSemi | LeftAnti => if (right.isStreaming) { throwError("Left outer/semi/anti joins with a streaming DataFrame/Dataset " + "on the right is not supported") } case RightOuter => if (left.isStreaming) { throwError("Right outer join with a streaming DataFrame/Dataset on the left is " + "not supported") } case NaturalJoin(_) | UsingJoin(_, _) => // They should not appear in an analyzed plan. case _ => throwError(s"Join type $joinType is not supported with streaming DataFrame/Dataset") } case c: CoGroup if c.children.exists(_.isStreaming) => throwError("CoGrouping with a streaming DataFrame/Dataset is not supported") case u: Union if u.children.map(_.isStreaming).distinct.size == 2 => throwError("Union between streaming and batch DataFrames/Datasets is not supported") case Except(left, right) if right.isStreaming => throwError("Except on a streaming DataFrame/Dataset on the right is not supported") case Intersect(left, right) if left.isStreaming && right.isStreaming => throwError("Intersect between two streaming DataFrames/Datasets is not supported") case GroupingSets(_, _, child, _) if child.isStreaming => throwError("GroupingSets is not supported on streaming DataFrames/Datasets") case GlobalLimit(_, _) | LocalLimit(_, _) if subPlan.children.forall(_.isStreaming) => throwError("Limits are not supported on streaming DataFrames/Datasets") case Sort(_, _, _) if !containsCompleteData(subPlan) => throwError("Sorting is not supported on streaming DataFrames/Datasets, unless it is on " + "aggregated DataFrame/Dataset in Complete output mode") case Sample(_, _, _, _, child) if child.isStreaming => throwError("Sampling is not supported on streaming DataFrames/Datasets") case Window(_, _, _, child) if child.isStreaming => throwError("Non-time-based windows are not supported on streaming DataFrames/Datasets") case ReturnAnswer(child) if child.isStreaming => throwError("Cannot return immediate result on streaming DataFrames/Dataset. Queries " + "with streaming DataFrames/Datasets must be executed with writeStream.start().") case _ => } } } private def throwErrorIf( condition: Boolean, msg: String)(implicit operator: LogicalPlan): Unit = { if (condition) { throwError(msg) } } private def throwError(msg: String)(implicit operator: LogicalPlan): Nothing = { throw new AnalysisException( msg, operator.origin.line, operator.origin.startPosition, Some(operator)) } }
wangyixiaohuihui/spark2-annotation
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UnsupportedOperationChecker.scala
Scala
apache-2.0
13,190
import java.net._ import scala.collection.JavaConverters._ import scala.io.Source case class Response( status: Int, headers: Map[String, String], body: String ) object Request { def apply( method: String, url: String, headers: Map[String, String], body: Option[String] ): Response = { val c = new URL(url) .openConnection() .asInstanceOf[HttpURLConnection] c.setInstanceFollowRedirects(false) c.setRequestMethod(method) c.setDoInput(true) c.setDoOutput(body.isDefined) headers foreach { case (k, v) => c.setRequestProperty(k, v) } body foreach { b => c.getOutputStream.write(b.getBytes("UTF-8")) } val response = Response( status = c.getResponseCode(), headers = c .getHeaderFields() .asScala .filter({ case (k, _) => k != null }) .map({ case (k, v) => (k, v.asScala.mkString(",")) }) .toMap - "Date" - "Content-Length" - "Server", body = Source.fromInputStream { if (c.getResponseCode() < 400) { c.getInputStream } else { c.getErrorStream } }.mkString ) c.disconnect() response } }
earldouglas/xsbt-web-plugin
src/sbt-test/tomcat-plugin/default/src/test/scala/Http.scala
Scala
bsd-3-clause
1,265
/* * Copyright (C) Lightbend Inc. <https://www.lightbend.com> */ package com.lightbend.lagom.scaladsl.persistence import akka.Done import akka.event.Logging import akka.stream.scaladsl.Flow import scala.concurrent.Future import akka.NotUsed import akka.persistence.query.NoOffset import akka.persistence.query.Offset object ReadSideProcessor { /** * An read side offset processor. * * This is responsible for the actual read side handling, including handling offsets and the events themselves. */ abstract class ReadSideHandler[Event <: AggregateEvent[Event]] { /** * Prepare the database for all processors. * * This will be invoked at system startup. It is guaranteed to only be invoked once at a time across the entire * cluster, and so is safe to be used to perform actions like creating tables, that could cause problems if * done from multiple nodes. * * It will be invoked again if it fails, and it may be invoked multiple times as nodes of the cluster go up or * down. Unless the entire system is restarted, there is no way to guarantee that it will be invoked at a * particular time - in particular, it should not be used for doing upgrades unless the entire system is * restarted and a new cluster built from scratch. * * @return A `Future` that is redeemed when preparation is finished. */ def globalPrepare(): Future[Done] = Future.successful(Done) /** * Prepare this processor. * * The primary purpose of this method is to load the last offset that was processed, so that read side * processing can continue from that offset. * * This also provides an opportunity for processors to do any initialisation activities, such as creating or * updating database tables, or migrating data. * * This will be invoked at least once for each tag, and may be invoked multiple times, such as in the event of * failure. * * @param tag The tag to get the offset for. * @return A `Future` that is redeemed when preparation is finished. */ def prepare(tag: AggregateEventTag[Event]): Future[Offset] = Future.successful(NoOffset); /** * Flow to handle the events. * * If the handler does any blocking, this flow should be configured to use a dispatcher that is configured to * allow for that blocking. */ def handle(): Flow[EventStreamElement[Event], Done, NotUsed] } } /** * A read side processor. * * Read side processors consume events produced by [[com.lightbend.lagom.scaladsl.persistence.PersistentEntity]] * instances, and update some read side data store that is optimized for queries. * * The events they consume must be tagged, and a read side is able to consume events of one or more tags. Events are * usually tagged according to some supertype of event, for example, events may be tagged as <code>Order</code> events. * They may also be tagged according to a hash of the ID of the entity associated with the event - this allows read * side event handling to be sharded across many nodes. Tagging is done using * [[com.lightbend.lagom.scaladsl.persistence.AggregateEventTag]]. * * Read side processors are responsible for tracking what events they have already seen. This is done using offsets, * which are sequential values associated with each event. Note that end users typically will not need to handle * offsets themselves, this will be provided by Lagom support specific to the read side datastore, and end users can * just focus on handling the events themselves. */ abstract class ReadSideProcessor[Event <: AggregateEvent[Event]] { /** * Return a [[ReadSideProcessor#ReadSideHandler]] for the given offset type. * * @return The offset processor. */ def buildHandler(): ReadSideProcessor.ReadSideHandler[Event] /** * The tags to aggregate. * * This must return at least one tag to aggregate. Read side processors will be sharded over the cluster by these * tags, so if events are tagged by a shard key, the read side processing load can be distributed across the * cluster. * * @return The tags to aggregate. */ def aggregateTags: Set[AggregateEventTag[Event]] /** * The name of this read side. * * This name should be unique among the read sides and entity types of the service. By default it is using the * short class name of the concrete `ReadSideProcessor` class. Subclasses may override to define other type names. * It is wise to override and retain the original name when the class name is changed because this name is used to * identify read sides throughout the cluster. */ def readSideName: String = Logging.simpleName(getClass) }
lagom/lagom
persistence/scaladsl/src/main/scala/com/lightbend/lagom/scaladsl/persistence/ReadSideProcessor.scala
Scala
apache-2.0
4,775
package scala.idx.Genesys import java.io.File import java.util import com.beust.jcommander.{JCommander,Parameter} import collection.JavaConversions._ object Args { // Declared as var because JCommander assigns a new collection declared // as java.util.List because that's what JCommander will replace it with. // It'd be nice if JCommander would just use the provided List so this // could be a val and a Scala LinkedList. @Parameter( names = Array("-f", "--file"), description = "Json Configuration File.") var configFile: String="configuration.json" @Parameter(names=Array("-h","--help"),description="Show help.",help=true) private var help:Boolean=_ @Parameter(names=Array("-v","--version"),description = "Show Version",help=true) var showVersion:Boolean=_ @Parameter( names=Array("-e","--entities"), description="Entities to process separated by comma." ) var entities:java.util.List[String]=new util.ArrayList[String]() @Parameter(names =Array("-st","--stemplates"),description = "Show the templates included in the config file." ) var showTemplates:Boolean=_ @Parameter(names=Array("-tf","--templates-files"),description = "Templates files to process, if not templates files are specified all templates files will be generated.") var templatesFiles=new util.ArrayList[String]() @Parameter(names=Array("-gc","--gconfig"),description="Generate Configuration Files and folders.") var generateConfig:Boolean=_ }
serdna27/Genesys
src/main/scala/scala/idx/Genesys/Args.scala
Scala
mit
1,558
/* * Copyright 2015 Nicolas Rinaudo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kantan.csv package scalaz import _root_.scalaz.Maybe import kantan.csv.scalaz.arbitrary._ import laws.discipline._ class MaybeCodecTests extends DisciplineSuite { import org.scalacheck.Arbitrary import kantan.csv.laws._ // These 2 implicits are not found in 2.13. I'm not sure why - it *might* have to do with the change in import // statements behaviour? implicit val ai: Arbitrary[IllegalRow[Maybe[(Int, Int)]]] = arbIllegalValueFromDec implicit val al: Arbitrary[LegalRow[Maybe[(Int, Int)]]] = arbLegalValueFromEnc checkAll("Maybe[Int]", CellCodecTests[Maybe[Int]].codec[String, Float]) checkAll("Maybe[(Int, Int)]", RowCodecTests[Maybe[(Int, Int)]].codec[String, Float]) }
nrinaudo/scala-csv
scalaz/shared/src/test/scala/kantan/csv/scalaz/MaybeCodecTests.scala
Scala
mit
1,308
package free.validation import cats.arrow.{NaturalTransformation => ~>} import cats.data.{Coproduct, Const, Kleisli} import cats.free.Inject import cats.syntax.cartesian._ import free.validation.Algebra.{DefaultMarks, JsonLikeAlgebra} import free.validation.Dsl.JsonLikeDsl import play.api.libs.json._ import scala.Function.unlift object Doc { case class DocMark[A](documentation: String) class DocDsl[M[_]](implicit I: Inject[DocMark, M]) { def doc[A](documentation: String): M[A] = I.inj(DocMark(documentation)) } object DocDsl { implicit def injectDocDsl[N[_]](implicit I: Inject[DocMark, N]): DocDsl[N] = new DocDsl[N] } val nt = new ~>[DocMark, Kleisli[Option, ?, String]] { def apply[A](mark: DocMark[A]): Kleisli[Option, A, String] = Kleisli(_ => None) } } object CustomUsage { def run(): Unit = { import Doc._ type MyMarks[A] = Coproduct[DocMark, DefaultMarks, A] type AL[T] = JsonLikeAlgebra[MyMarks, T] val coreDsl: JsonLikeDsl[MyMarks] = implicitly; import coreDsl._ val docDsl: DocDsl[MyMarks] = implicitly; import docDsl._ case class Pet(name: String, weight: Int) case class Person(name: String, age: Int, pet: Option[Pet]) implicit val petConfig: FreeIM[AL, Pet] = ( (__ \\ "name").as[String]() |@| (__ \\ "weight").as[Int]() ).imap(Pet.apply)(unlift(Pet.unapply)) val personConfig: FreeIM[AL, Person] = ( (__ \\ "name").as[String](nonEmpty, doc("here is my name")) |@| (__ \\ "age").as[Int](doc("that's the age")) |@| (__ \\ "pet").as[Option[Pet]]() ).imap(Person.apply)(unlift(Person.unapply)) val codec: Codec[Person, JsObject] = personConfig.foldMap[Codec[?, JsObject]](Compile2JsCodec.compile[MyMarks](Doc.nt or Compile2JsCodec.defaultMarks)) val me = Person("Olivier", 25, Some(Pet("sansan", 10))) val me2 = Person("Olivier", 25, None) val me3 = Person("", 25, Some(Pet("sansan", 10))) val json: JsObject = codec.writes(me) val json2: JsObject = codec.writes(me2) val json3: JsObject = codec.writes(me3) val validated = codec.validate(json) val validated2 = codec.validate(json2) val validated3 = codec.validate(json3) assert(json.toString == """{"name":"Olivier","age":25,"pet":{"name":"sansan","weight":10}}""") assert(json2.toString == """{"name":"Olivier","age":25,"pet":null}""") assert(json3.toString == """{"name":"","age":25,"pet":{"name":"sansan","weight":10}}""") assert(validated.toString == """Success(Person(Olivier,25,Some(Pet(sansan,10))))""") assert(validated2.toString == """Success(Person(Olivier,25,None))""") assert(validated3.toString == """Failure(List((/name,ArrayBuffer(ValidationError(List(empty string),WrappedArray())))))""") val docNT: DocMark ~> Const[Option[String], ?] = new ~>[DocMark, Const[Option[String], ?]] { def apply[A](m: DocMark[A]): Const[Option[String], A] = Const(Some(m.documentation)) } def myMarksCompiler[A] = Compile2JsonSchema.compile[MyMarks, A](docNT or Compile2JsonSchema.defaultMarks) _ val jsonSchema: JsObject = myMarksCompiler(personConfig) assert(Json.prettyPrint(jsonSchema) == """{ | "schema" : { | "type" : "object", | "properties" : { | "name" : { | "type" : "string", | "description" : "here is my name" | }, | "age" : { | "type" : "number", | "description" : "that's the age" | }, | "pet" : { | "type" : "object", | "properties" : { | "name" : { | "type" : "string" | }, | "weight" : { | "type" : "number" | } | } | } | } | } |}""".stripMargin ) } }
OlivierBlanvillain/free-validation
src/main/scala/CustomUsage.scala
Scala
mit
3,974
package com.avsystem.scex.compiler.annotation import scala.annotation.StaticAnnotation /** * Created: 04-11-2014 * Author: ghik */ class Input extends StaticAnnotation
AVSystem/scex
scex-core/src/main/scala/com/avsystem/scex/compiler/annotation/Input.scala
Scala
mit
175
package works.weave.socks.aws.orders.domain.repository import java.net.URI import works.weave.socks.aws.orders.domain.repository.CartRepository.OrderItem trait CartRepository { def findItemsByURI(items : URI) : List[OrderItem] } object CartRepository { case class OrderItem(id : String, itemId : String, quantity : Integer, unitPrice : Number) }
Compositional/orders-aws
src/main/scala/works.weave.socks.aws.orders/domain/repository/CartRepository.scala
Scala
apache-2.0
364
/* * Copyright 2016 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.computations.calculations import uk.gov.hmrc.ct.box.CtTypeConverters import uk.gov.hmrc.ct.computations._ trait AdjustedTradingProfitOrLossCalculator extends CtTypeConverters { def calculateAdjustedTradingProfit(cp44: CP44, cp54: CP54, cp59: CP59, cp186: CP186, cp91: CP91, cp670: CP670, cp668: CP668): CP117 = { CP117(profit(cp44, cp54, cp59, cp186, cp91, cp670, cp668) max 0) } def calculateAdjustedTradingLoss(cp44: CP44, cp54: CP54, cp59: CP59, cp186: CP186, cp91: CP91, cp670: CP670, cp668: CP668): CP118 = { CP118((profit(cp44, cp54, cp59, cp186, cp91, cp670, cp668) min 0).abs) } private def profit(cp44: CP44, cp54: CP54, cp59: CP59, cp186: CP186, cp91: CP91, cp670: CP670, cp668: CP668): Int = cp44 + cp54 - cp59 - cp186 + cp91 + cp670 - cp668 } trait AdjustedTradingProfitForPeriodCalculator extends CtTypeConverters { def adjustedTradingProfitForPeriodCalculation(cp117: CP117, cpq17: CPQ17): CP282 = { val result = cpq17.value match { case Some(true) => Some(cp117.value) case _ => None } CP282(result) } } trait AdjustedTradingProfitCalculator { def adjustedTradingProfitCalculation(cp117: CP117): CP289 = { CP289(if (cp117.value < 0) None else Some(cp117.value)) } def adjustedTradingProfitCalculationNonOptional(cp117: CP117): CP256 = { val value = cp117.value CP256(if (value < 0) 0 else value) } }
ahudspith-equalexperts/ct-calculations
src/main/scala/uk/gov/hmrc/ct/computations/calculations/AdjustedTradingProfitOrLossCalculator.scala
Scala
apache-2.0
2,503
import stainless.lang._ object InnerClassLocalFun { abstract class Test { def something: BigInt } def foo(x: Boolean, l: BigInt): Test = { def bar(y: Boolean, m: BigInt): Test = { case class Bar() extends Test { def something = if (x && y) l else m } Bar() } bar(true, 3) } def test = (foo(true, 42).something == 42).holds }
epfl-lara/stainless
frontends/benchmarks/extraction/valid/InnerClassLocalFun.scala
Scala
apache-2.0
381
package at.ait.dme.forcelayout /** * A node in the force layout simulation. The node has an immutable component, representing the actual * graph node, and a mutable 'state' field, containing the force simulation state. * @author Rainer Simon <[email protected]> */ case class Node private[forcelayout] (id: String, label: String, mass: Double, group: Int, inlinks: Seq[Edge], outlinks: Seq[Edge], state: NodeState) { def this(id: String, label: String, mass: Double = 1.0, group: Int = 0) = this(id, label, mass, group, Seq.empty[Edge], Seq.empty[Edge], NodeState()) lazy val links = inlinks ++ outlinks } object Node { // Shortcut, so the auxiliary constructor works in the normal case-class way def apply(id: String, label: String, mass: Double = 1.0, group: Int = 0) = new Node(id, label, mass, group) } /** * A container for the (mutable) force simulation state of a graph node. * @author Rainer Simon <[email protected]> */ case class NodeState(var pos: Vector2D = Vector2D.random(1.0), var velocity: Vector2D = Vector2D(0, 0), var force: Vector2D = Vector2D(0, 0))
rsimon/scala-force-layout
src/main/scala/at/ait/dme/forcelayout/Node.scala
Scala
mit
1,119
package org.monarchinitiative.dosdp import org.apache.jena.sys.JenaSystem import org.monarchinitiative.dosdp.cli.Config import zio._ import zio.test.Assertion._ import zio.test._ import zio.logging._ object QueryGeneratorRegexTest extends DefaultRunnableSpec { JenaSystem.init() def spec = suite("Test query label token regex") { testM("Query label token regex") { for { dosdp <- Config.inputDOSDPFrom("src/test/resources/org/monarchinitiative/dosdp/QueryGeneratorRegexTest.yaml") results <- SPARQL.triplesFor(ExpandedDOSDP(dosdp, OBOPrefixes), Config.AnnotationAxioms) } yield assert(results.forall(a => a.contains("vector-borne")))(isFalse) && assert(results.forall(a => !a.contains("infectious_disease123asdf")))(isFalse) } }.provideCustomLayer(Logging.consoleErr()) }
INCATools/dosdp-tools
src/test/scala/org/monarchinitiative/dosdp/QueryGeneratorRegexTest.scala
Scala
mit
819
/******************************************************************************* Copyright (c) 2012-2014, S-Core, KAIST. All rights reserved. Use is subject to license terms. This distribution may include materials developed by third parties. ***************************************************************************** */ package kr.ac.kaist.jsaf.analysis.typing.domain import kr.ac.kaist.jsaf.analysis.typing.Config import kr.ac.kaist.jsaf.analysis.typing.models.DOMHtml.HTMLTopElement import kr.ac.kaist.jsaf.analysis.cfg.{CFG, InternalError} import kr.ac.kaist.jsaf.analysis.typing.AddressManager._ object DomainPrinter { def printHeap(ind: Int, heap: Heap, cfg: CFG, verbose_lv: Int): String = { val printer = new DomainPrinter(verbose_lv) printer.indent(ind) printer.ppHeap(ind, heap, cfg) printer.toString } def printHeap(ind: Int, heap: Heap, cfg: CFG): String = { val printer = new DomainPrinter(Config.verbose) printer.indent(ind) printer.ppHeap(ind, heap, cfg) printer.toString } def printContext(ind: Int, ctx: Context): String = { val printer = new DomainPrinter(Config.verbose) printer.indent(ind) printer.ppContext(ind, ctx) printer.toString } def printValue(value: Value): String = { val printer = new DomainPrinter(Config.verbose) printer.ppValue(value) printer.toString } def printLoc(l: Loc): String = { val printer = new DomainPrinter(Config.verbose) printer.ppLoc(l) printer.toString } def printLocSet(lset: LocSet): String = { val printer = new DomainPrinter(Config.verbose) printer.ppLocSet(lset) printer.toString } def printObj(ind: Int, o: Obj): String = { val printer = new DomainPrinter(Config.verbose) printer.indent(ind) printer.ppObj(ind, o, Config.verbose >= 2) printer.toString } } private class DomainPrinter(verbose_lv: Int) { val sb = new StringBuilder() def indent(n: Int): Unit = { for (i <- 0 to n-1) sb.append(" ") } def newline(ind: Int, first: Boolean): Boolean = { if (!first) { sb.append("\\n") indent(ind) } false } def ppHeap(ind: Int, heap: Heap, cfg: CFG): Unit = { var first = true for ((loc, obj) <- heap.map.toSeq.sortBy(_._1)) { // for non-verbose mode, locations for built-in are skipped. // verbose level1 : user location & html location // verbose level2 : user location & predefined location // verbose level3 : print all if ( verbose_lv == 3 || (locToAddr(loc) >= locToAddr(CollapsedLoc) && !cfg.isHtmlAddr(locToAddr(loc))) //|| (verbose_lv == 1 && cfg.isHtmlAddr(locToAddr(loc))) || (verbose_lv == 1 && (HTMLTopElement.getInsLoc(heap).contains(loc) || cfg.isHtmlAddr(locToAddr(loc)))) || (verbose_lv == 2 && locToAddr(loc) < locToAddr(CollapsedLoc))) { /*} if (verbose_lv == || locToAddr(loc) >= locToAddr(CollapsedLoc)) { */ first = newline(ind, first) val len = ppLoc(loc) sb.append(" -> ") if (verbose_lv >= 2 || locToAddr(loc) != locToAddr(GlobalLoc)) { ppObj(ind+len+4, obj, true) } else { ppObj(ind+len+4, obj, false) } } } if (first) sb.append("Bot") } def ppContext(ind: Int, ctx: Context): Unit = { sb.append("{") // ppLocSet(ctx._1); sb.append("} X {") // ppLocSet(ctx._2) if (Config.verbose >= 2) { sb.append("} X {") ppAddrSet(ctx._3) sb.append("} X ") if (ctx._4 == null) { sb.append("Top") } else { sb.append("{") ppAddrSet(ctx._4) sb.append("}") } } else { sb.append("}") } } def ppObj(ind: Int, obj: Obj, verbose: Boolean): Unit = { var first = true val map = obj.getAllProps for ((prop) <- map.toSeq.sortBy(f => f)) { val pv = obj(prop) val abs = obj.domIn(prop) val show = verbose match { case true => true case false => Config.testMode match { case true => !Config.globalVerboseProp(prop) && !Config.testModeProp.contains(prop) case false => !Config.globalVerboseProp(prop) } } if (show) { first = newline(ind, first) val len = ppProp(prop) val arrow = if (!(BoolFalse <= abs) || prop.take(1) == "@") " -> " else " @-> " sb.append(arrow) ppPropValue(ind+len+arrow.length, pv) } } if (first) sb.append("{ }") } def ppPropValue(ind: Int, pv: PropValue): Unit = { var first = true val ov = pv._1 if (ov != ObjectValueBot) { first = newline(ind, first) (ov._2, ov._3, ov._4) match { case (BoolBot, BoolBot, BoolBot) => sb.append("[VAL] ") case _ => sb.append("[") ppBool(ov._2) ppBool(ov._3) ppBool(ov._4) sb.append("] ") } ppValue(ov._1) } val fun = pv._3 if (fun != FunSetBot) { first = newline(ind, first) sb.append("[FUN] ") ppFunSet(fun) } if (first) sb.append("Bot") } def ppValue(v: Value): Unit = { var first = true if (v._1 != PValueBot) { sb.append(v._1.toString) first = false } if (v._2 != LocSetBot) { if (!first) sb.append(", ") ppLocSet(v._2) first = false } if (first) sb.append("Bot") } def ppProp(prop: String): Int = { val str = prop sb.append(str) str.length } def ppLoc(loc: Loc): Int = { val name = locName(loc) val str = isRecentLoc(loc) match { case true => "#" + name case false => "##" + name } sb.append(str) str.length } def ppAddrSet(set: AddrSet): Unit = { var first = true for (addr <- set.toSeq.sorted) { if (first) { first = false } else { sb.append(", ") } ppLoc(addrToLoc(addr, Recent)) } } def ppLocSet(set: LocSet): Unit = { var first = true for (loc <- set.toSeq.sorted) { if (first) { first = false } else { sb.append(", ") } ppLoc(loc) } } def ppFunSet(set: FunSet): Unit = { var first = true sb.append("{") for (fid <- set.toSeq.sorted) { if (first) { first = false } else { sb.append(", ") } sb.append(fid.toString) } sb.append("}") } def ppBool(b: AbsBool): Unit = { val str = b.getPair match { case (AbsBot, _) => "B" case (AbsSingle, Some(b)) => if (b) "t" else "f" case (AbsTop, _) => "T" case _ => throw new InternalError("AbsBool does not have an abstract value for multiple values.") } sb.append(str) } override def toString = sb.toString }
darkrsw/safe
src/main/scala/kr/ac/kaist/jsaf/analysis/typing/domain/DomainPrinter.scala
Scala
bsd-3-clause
7,158
trait T { override def clone(): String = "hi" } trait U extends T class C1 extends U with Cloneable { def f1 = (this: T).clone() def f2 = (this: U).clone() def f3 = (this: C1).clone() } class C2 { def f1(t: T) = t.clone() def f2(t: U) = t.clone() def f3(t: C1) = t.clone() } object Test { def main(arg: Array[String]): Unit = { val r = new StringBuffer() val c1 = new C1 r.append(c1.f1) r.append(c1.f2) r.append(c1.f3) val t = new T { } val u = new U { } val c2 = new C2 r.append(c2.f1(t)) r.append(c2.f1(u)) r.append(c2.f1(c1)) r.append(c2.f2(u)) r.append(c2.f2(c1)) r.append(c2.f3(c1)) r.toString } }
som-snytt/dotty
tests/run/invocationReceivers2.scala
Scala
apache-2.0
684
package org.apache.predictionio.workflow import org.apache.predictionio.controller._ import org.scalatest.FunSuite import org.scalatest.Matchers._ class EvaluationWorkflowSuite extends FunSuite with SharedSparkContext { test("Evaluation return best engine params, simple result type: Double") { val engine = new Engine1() val ep0 = EngineParams(dataSourceParams = Engine1.DSP(0.2)) val ep1 = EngineParams(dataSourceParams = Engine1.DSP(0.3)) val ep2 = EngineParams(dataSourceParams = Engine1.DSP(0.3)) val ep3 = EngineParams(dataSourceParams = Engine1.DSP(-0.2)) val engineParamsList = Seq(ep0, ep1, ep2, ep3) val evaluator = MetricEvaluator(new Metric0()) object Eval extends Evaluation { engineEvaluator = (new Engine1(), MetricEvaluator(new Metric0())) } val result = EvaluationWorkflow.runEvaluation( sc, Eval, engine, engineParamsList, evaluator, WorkflowParams()) result.bestScore.score shouldBe 0.3 result.bestEngineParams shouldBe ep1 } test("Evaluation return best engine params, complex result type") { val engine = new Engine1() val ep0 = EngineParams(dataSourceParams = Engine1.DSP(0.2)) val ep1 = EngineParams(dataSourceParams = Engine1.DSP(0.3)) val ep2 = EngineParams(dataSourceParams = Engine1.DSP(0.3)) val ep3 = EngineParams(dataSourceParams = Engine1.DSP(-0.2)) val engineParamsList = Seq(ep0, ep1, ep2, ep3) val evaluator = MetricEvaluator(new Metric1()) object Eval extends Evaluation { engineEvaluator = (new Engine1(), MetricEvaluator(new Metric1())) } val result = EvaluationWorkflow.runEvaluation( sc, Eval, engine, engineParamsList, evaluator, WorkflowParams()) result.bestScore.score shouldBe Metric1.Result(0, 0.3) result.bestEngineParams shouldBe ep1 } }
alex9311/PredictionIO
core/src/test/scala/org/apache/predictionio/workflow/EvaluationWorkflowTest.scala
Scala
apache-2.0
1,893
/* * Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com> */ package play.api.db.evolutions import java.io.File import org.specs2.mutable.Specification import play.api.{ Environment, Mode } class EvolutionsReaderSpec extends Specification { "EnvironmentEvolutionsReader" should { "read evolution files from classpath" in { val environment = Environment(new File("."), getClass.getClassLoader, Mode.Test) val reader = new EnvironmentEvolutionsReader(environment) reader.evolutions("test") must_== Seq( Evolution(1, "create table test (id bigint not null, name varchar(255));", "drop table if exists test;"), Evolution(2, "insert into test (id, name) values (1, 'alice');\\ninsert into test (id, name) values (2, 'bob');", "delete from test;"), Evolution(3, "insert into test (id, name) values (3, 'charlie');\\ninsert into test (id, name) values (4, 'dave');", ""), Evolution(4, "", "") ) } } }
wsargent/playframework
framework/src/play-jdbc-evolutions/src/test/scala/play/api/db/evolutions/EvolutionsReaderSpec.scala
Scala
apache-2.0
982
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql import scala.collection.mutable.HashSet import scala.concurrent.duration._ import scala.language.postfixOps import org.apache.spark.CleanerListener import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.expressions.SubqueryExpression import org.apache.spark.sql.execution.{RDDScanExec, SparkPlan} import org.apache.spark.sql.execution.columnar._ import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.{SharedSQLContext, SQLTestUtils} import org.apache.spark.storage.{RDDBlockId, StorageLevel} import org.apache.spark.util.{AccumulatorContext, Utils} private case class BigData(s: String) class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext { import testImplicits._ setupTestData() override def afterEach(): Unit = { try { spark.catalog.clearCache() } finally { super.afterEach() } } def rddIdOf(tableName: String): Int = { val plan = spark.table(tableName).queryExecution.sparkPlan plan.collect { case InMemoryTableScanExec(_, _, relation) => relation.cachedColumnBuffers.id case _ => fail(s"Table $tableName is not cached\n" + plan) }.head } def isMaterialized(rddId: Int): Boolean = { val maybeBlock = sparkContext.env.blockManager.get(RDDBlockId(rddId, 0)) maybeBlock.foreach(_ => sparkContext.env.blockManager.releaseLock(RDDBlockId(rddId, 0))) maybeBlock.nonEmpty } private def getNumInMemoryRelations(ds: Dataset[_]): Int = { val plan = ds.queryExecution.withCachedData var sum = plan.collect { case _: InMemoryRelation => 1 }.sum plan.transformAllExpressions { case e: SubqueryExpression => sum += getNumInMemoryRelations(e.plan) e } sum } private def getNumInMemoryTablesRecursively(plan: SparkPlan): Int = { plan.collect { case InMemoryTableScanExec(_, _, relation) => getNumInMemoryTablesRecursively(relation.child) + 1 }.sum } test("withColumn doesn't invalidate cached dataframe") { var evalCount = 0 val myUDF = udf((x: String) => { evalCount += 1; "result" }) val df = Seq(("test", 1)).toDF("s", "i").select(myUDF($"s")) df.cache() df.collect() assert(evalCount === 1) df.collect() assert(evalCount === 1) val df2 = df.withColumn("newColumn", lit(1)) df2.collect() // We should not reevaluate the cached dataframe assert(evalCount === 1) } test("cache temp table") { withTempView("tempTable") { testData.select('key).createOrReplaceTempView("tempTable") assertCached(sql("SELECT COUNT(*) FROM tempTable"), 0) spark.catalog.cacheTable("tempTable") assertCached(sql("SELECT COUNT(*) FROM tempTable")) spark.catalog.uncacheTable("tempTable") } } test("unpersist an uncached table will not raise exception") { assert(None == spark.sharedState.cacheManager.lookupCachedData(testData)) testData.unpersist(blocking = true) assert(None == spark.sharedState.cacheManager.lookupCachedData(testData)) testData.unpersist(blocking = false) assert(None == spark.sharedState.cacheManager.lookupCachedData(testData)) testData.persist() assert(None != spark.sharedState.cacheManager.lookupCachedData(testData)) testData.unpersist(blocking = true) assert(None == spark.sharedState.cacheManager.lookupCachedData(testData)) testData.unpersist(blocking = false) assert(None == spark.sharedState.cacheManager.lookupCachedData(testData)) } test("cache table as select") { withTempView("tempTable") { sql("CACHE TABLE tempTable AS SELECT key FROM testData") assertCached(sql("SELECT COUNT(*) FROM tempTable")) spark.catalog.uncacheTable("tempTable") } } test("uncaching temp table") { testData.select('key).createOrReplaceTempView("tempTable1") testData.select('key).createOrReplaceTempView("tempTable2") spark.catalog.cacheTable("tempTable1") assertCached(sql("SELECT COUNT(*) FROM tempTable1")) assertCached(sql("SELECT COUNT(*) FROM tempTable2")) // Is this valid? spark.catalog.uncacheTable("tempTable2") // Should this be cached? assertCached(sql("SELECT COUNT(*) FROM tempTable1"), 0) } test("too big for memory") { val data = "*" * 1000 sparkContext.parallelize(1 to 200000, 1).map(_ => BigData(data)).toDF() .createOrReplaceTempView("bigData") spark.table("bigData").persist(StorageLevel.MEMORY_AND_DISK) assert(spark.table("bigData").count() === 200000L) spark.table("bigData").unpersist(blocking = true) } test("calling .cache() should use in-memory columnar caching") { spark.table("testData").cache() assertCached(spark.table("testData")) spark.table("testData").unpersist(blocking = true) } test("calling .unpersist() should drop in-memory columnar cache") { spark.table("testData").cache() spark.table("testData").count() spark.table("testData").unpersist(blocking = true) assertCached(spark.table("testData"), 0) } test("isCached") { spark.catalog.cacheTable("testData") assertCached(spark.table("testData")) assert(spark.table("testData").queryExecution.withCachedData match { case _: InMemoryRelation => true case _ => false }) spark.catalog.uncacheTable("testData") assert(!spark.catalog.isCached("testData")) assert(spark.table("testData").queryExecution.withCachedData match { case _: InMemoryRelation => false case _ => true }) } test("SPARK-1669: cacheTable should be idempotent") { assume(!spark.table("testData").logicalPlan.isInstanceOf[InMemoryRelation]) spark.catalog.cacheTable("testData") assertCached(spark.table("testData")) assertResult(1, "InMemoryRelation not found, testData should have been cached") { getNumInMemoryRelations(spark.table("testData")) } spark.catalog.cacheTable("testData") assertResult(0, "Double InMemoryRelations found, cacheTable() is not idempotent") { spark.table("testData").queryExecution.withCachedData.collect { case r @ InMemoryRelation(_, _, _, _, _: InMemoryTableScanExec, _) => r }.size } spark.catalog.uncacheTable("testData") } test("read from cached table and uncache") { spark.catalog.cacheTable("testData") checkAnswer(spark.table("testData"), testData.collect().toSeq) assertCached(spark.table("testData")) spark.catalog.uncacheTable("testData") checkAnswer(spark.table("testData"), testData.collect().toSeq) assertCached(spark.table("testData"), 0) } test("SELECT star from cached table") { sql("SELECT * FROM testData").createOrReplaceTempView("selectStar") spark.catalog.cacheTable("selectStar") checkAnswer( sql("SELECT * FROM selectStar WHERE key = 1"), Seq(Row(1, "1"))) spark.catalog.uncacheTable("selectStar") } test("Self-join cached") { val unCachedAnswer = sql("SELECT * FROM testData a JOIN testData b ON a.key = b.key").collect() spark.catalog.cacheTable("testData") checkAnswer( sql("SELECT * FROM testData a JOIN testData b ON a.key = b.key"), unCachedAnswer.toSeq) spark.catalog.uncacheTable("testData") } test("'CACHE TABLE' and 'UNCACHE TABLE' SQL statement") { sql("CACHE TABLE testData") assertCached(spark.table("testData")) val rddId = rddIdOf("testData") assert( isMaterialized(rddId), "Eagerly cached in-memory table should have already been materialized") sql("UNCACHE TABLE testData") assert(!spark.catalog.isCached("testData"), "Table 'testData' should not be cached") eventually(timeout(10 seconds)) { assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted") } } test("CACHE TABLE tableName AS SELECT * FROM anotherTable") { withTempView("testCacheTable") { sql("CACHE TABLE testCacheTable AS SELECT * FROM testData") assertCached(spark.table("testCacheTable")) val rddId = rddIdOf("testCacheTable") assert( isMaterialized(rddId), "Eagerly cached in-memory table should have already been materialized") spark.catalog.uncacheTable("testCacheTable") eventually(timeout(10 seconds)) { assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted") } } } test("CACHE TABLE tableName AS SELECT ...") { withTempView("testCacheTable") { sql("CACHE TABLE testCacheTable AS SELECT key FROM testData LIMIT 10") assertCached(spark.table("testCacheTable")) val rddId = rddIdOf("testCacheTable") assert( isMaterialized(rddId), "Eagerly cached in-memory table should have already been materialized") spark.catalog.uncacheTable("testCacheTable") eventually(timeout(10 seconds)) { assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted") } } } test("CACHE LAZY TABLE tableName") { sql("CACHE LAZY TABLE testData") assertCached(spark.table("testData")) val rddId = rddIdOf("testData") assert( !isMaterialized(rddId), "Lazily cached in-memory table shouldn't be materialized eagerly") sql("SELECT COUNT(*) FROM testData").collect() assert( isMaterialized(rddId), "Lazily cached in-memory table should have been materialized") spark.catalog.uncacheTable("testData") eventually(timeout(10 seconds)) { assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted") } } test("InMemoryRelation statistics") { sql("CACHE TABLE testData") spark.table("testData").queryExecution.withCachedData.collect { case cached: InMemoryRelation => val actualSizeInBytes = (1 to 100).map(i => 4 + i.toString.length + 4).sum assert(cached.stats.sizeInBytes === actualSizeInBytes) } } test("Drops temporary table") { testData.select('key).createOrReplaceTempView("t1") spark.table("t1") spark.catalog.dropTempView("t1") intercept[AnalysisException](spark.table("t1")) } test("Drops cached temporary table") { testData.select('key).createOrReplaceTempView("t1") testData.select('key).createOrReplaceTempView("t2") spark.catalog.cacheTable("t1") assert(spark.catalog.isCached("t1")) assert(spark.catalog.isCached("t2")) spark.catalog.dropTempView("t1") intercept[AnalysisException](spark.table("t1")) assert(!spark.catalog.isCached("t2")) } test("Clear all cache") { sql("SELECT key FROM testData LIMIT 10").createOrReplaceTempView("t1") sql("SELECT key FROM testData LIMIT 5").createOrReplaceTempView("t2") spark.catalog.cacheTable("t1") spark.catalog.cacheTable("t2") spark.catalog.clearCache() assert(spark.sharedState.cacheManager.isEmpty) sql("SELECT key FROM testData LIMIT 10").createOrReplaceTempView("t1") sql("SELECT key FROM testData LIMIT 5").createOrReplaceTempView("t2") spark.catalog.cacheTable("t1") spark.catalog.cacheTable("t2") sql("Clear CACHE") assert(spark.sharedState.cacheManager.isEmpty) } test("Ensure accumulators to be cleared after GC when uncacheTable") { sql("SELECT key FROM testData LIMIT 10").createOrReplaceTempView("t1") sql("SELECT key FROM testData LIMIT 5").createOrReplaceTempView("t2") spark.catalog.cacheTable("t1") spark.catalog.cacheTable("t2") sql("SELECT * FROM t1").count() sql("SELECT * FROM t2").count() sql("SELECT * FROM t1").count() sql("SELECT * FROM t2").count() val toBeCleanedAccIds = new HashSet[Long] val accId1 = spark.table("t1").queryExecution.withCachedData.collect { case i: InMemoryRelation => i.sizeInBytesStats.id }.head toBeCleanedAccIds += accId1 val accId2 = spark.table("t1").queryExecution.withCachedData.collect { case i: InMemoryRelation => i.sizeInBytesStats.id }.head toBeCleanedAccIds += accId2 val cleanerListener = new CleanerListener { def rddCleaned(rddId: Int): Unit = {} def shuffleCleaned(shuffleId: Int): Unit = {} def broadcastCleaned(broadcastId: Long): Unit = {} def accumCleaned(accId: Long): Unit = { toBeCleanedAccIds.synchronized { toBeCleanedAccIds -= accId } } def checkpointCleaned(rddId: Long): Unit = {} } spark.sparkContext.cleaner.get.attachListener(cleanerListener) spark.catalog.uncacheTable("t1") spark.catalog.uncacheTable("t2") System.gc() eventually(timeout(10 seconds)) { assert(toBeCleanedAccIds.synchronized { toBeCleanedAccIds.isEmpty }, "batchStats accumulators should be cleared after GC when uncacheTable") } assert(AccumulatorContext.get(accId1).isEmpty) assert(AccumulatorContext.get(accId2).isEmpty) } test("SPARK-10327 Cache Table is not working while subquery has alias in its project list") { sparkContext.parallelize((1, 1) :: (2, 2) :: Nil) .toDF("key", "value").selectExpr("key", "value", "key+1").createOrReplaceTempView("abc") spark.catalog.cacheTable("abc") val sparkPlan = sql( """select a.key, b.key, c.key from |abc a join abc b on a.key=b.key |join abc c on a.key=c.key""".stripMargin).queryExecution.sparkPlan assert(sparkPlan.collect { case e: InMemoryTableScanExec => e }.size === 3) assert(sparkPlan.collect { case e: RDDScanExec => e }.size === 0) } /** * Verifies that the plan for `df` contains `expected` number of Exchange operators. */ private def verifyNumExchanges(df: DataFrame, expected: Int): Unit = { assert( df.queryExecution.executedPlan.collect { case e: ShuffleExchangeExec => e }.size == expected) } test("A cached table preserves the partitioning and ordering of its cached SparkPlan") { val table3x = testData.union(testData).union(testData) table3x.createOrReplaceTempView("testData3x") sql("SELECT key, value FROM testData3x ORDER BY key").createOrReplaceTempView("orderedTable") spark.catalog.cacheTable("orderedTable") assertCached(spark.table("orderedTable")) // Should not have an exchange as the query is already sorted on the group by key. verifyNumExchanges(sql("SELECT key, count(*) FROM orderedTable GROUP BY key"), 0) checkAnswer( sql("SELECT key, count(*) FROM orderedTable GROUP BY key ORDER BY key"), sql("SELECT key, count(*) FROM testData3x GROUP BY key ORDER BY key").collect()) spark.catalog.uncacheTable("orderedTable") spark.catalog.dropTempView("orderedTable") // Set up two tables distributed in the same way. Try this with the data distributed into // different number of partitions. for (numPartitions <- 1 until 10 by 4) { withTempView("t1", "t2") { testData.repartition(numPartitions, $"key").createOrReplaceTempView("t1") testData2.repartition(numPartitions, $"a").createOrReplaceTempView("t2") spark.catalog.cacheTable("t1") spark.catalog.cacheTable("t2") // Joining them should result in no exchanges. verifyNumExchanges(sql("SELECT * FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a"), 0) checkAnswer(sql("SELECT * FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a"), sql("SELECT * FROM testData t1 JOIN testData2 t2 ON t1.key = t2.a")) // Grouping on the partition key should result in no exchanges verifyNumExchanges(sql("SELECT count(*) FROM t1 GROUP BY key"), 0) checkAnswer(sql("SELECT count(*) FROM t1 GROUP BY key"), sql("SELECT count(*) FROM testData GROUP BY key")) spark.catalog.uncacheTable("t1") spark.catalog.uncacheTable("t2") } } // Distribute the tables into non-matching number of partitions. Need to shuffle one side. withTempView("t1", "t2") { testData.repartition(6, $"key").createOrReplaceTempView("t1") testData2.repartition(3, $"a").createOrReplaceTempView("t2") spark.catalog.cacheTable("t1") spark.catalog.cacheTable("t2") val query = sql("SELECT key, value, a, b FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a") verifyNumExchanges(query, 1) assert(query.queryExecution.executedPlan.outputPartitioning.numPartitions === 6) checkAnswer( query, testData.join(testData2, $"key" === $"a").select($"key", $"value", $"a", $"b")) spark.catalog.uncacheTable("t1") spark.catalog.uncacheTable("t2") } // One side of join is not partitioned in the desired way. Need to shuffle one side. withTempView("t1", "t2") { testData.repartition(6, $"value").createOrReplaceTempView("t1") testData2.repartition(6, $"a").createOrReplaceTempView("t2") spark.catalog.cacheTable("t1") spark.catalog.cacheTable("t2") val query = sql("SELECT key, value, a, b FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a") verifyNumExchanges(query, 1) assert(query.queryExecution.executedPlan.outputPartitioning.numPartitions === 6) checkAnswer( query, testData.join(testData2, $"key" === $"a").select($"key", $"value", $"a", $"b")) spark.catalog.uncacheTable("t1") spark.catalog.uncacheTable("t2") } withTempView("t1", "t2") { testData.repartition(6, $"value").createOrReplaceTempView("t1") testData2.repartition(12, $"a").createOrReplaceTempView("t2") spark.catalog.cacheTable("t1") spark.catalog.cacheTable("t2") val query = sql("SELECT key, value, a, b FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a") verifyNumExchanges(query, 1) assert(query.queryExecution.executedPlan.outputPartitioning.numPartitions === 12) checkAnswer( query, testData.join(testData2, $"key" === $"a").select($"key", $"value", $"a", $"b")) spark.catalog.uncacheTable("t1") spark.catalog.uncacheTable("t2") } // One side of join is not partitioned in the desired way. Since the number of partitions of // the side that has already partitioned is smaller than the side that is not partitioned, // we shuffle both side. withTempView("t1", "t2") { testData.repartition(6, $"value").createOrReplaceTempView("t1") testData2.repartition(3, $"a").createOrReplaceTempView("t2") spark.catalog.cacheTable("t1") spark.catalog.cacheTable("t2") val query = sql("SELECT key, value, a, b FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a") verifyNumExchanges(query, 2) checkAnswer( query, testData.join(testData2, $"key" === $"a").select($"key", $"value", $"a", $"b")) spark.catalog.uncacheTable("t1") spark.catalog.uncacheTable("t2") } // repartition's column ordering is different from group by column ordering. // But they use the same set of columns. withTempView("t1") { testData.repartition(6, $"value", $"key").createOrReplaceTempView("t1") spark.catalog.cacheTable("t1") val query = sql("SELECT value, key from t1 group by key, value") verifyNumExchanges(query, 0) checkAnswer( query, testData.distinct().select($"value", $"key")) spark.catalog.uncacheTable("t1") } // repartition's column ordering is different from join condition's column ordering. // We will still shuffle because hashcodes of a row depend on the column ordering. // If we do not shuffle, we may actually partition two tables in totally two different way. // See PartitioningSuite for more details. withTempView("t1", "t2") { val df1 = testData df1.repartition(6, $"value", $"key").createOrReplaceTempView("t1") val df2 = testData2.select($"a", $"b".cast("string")) df2.repartition(6, $"a", $"b").createOrReplaceTempView("t2") spark.catalog.cacheTable("t1") spark.catalog.cacheTable("t2") val query = sql("SELECT key, value, a, b FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a and t1.value = t2.b") verifyNumExchanges(query, 1) assert(query.queryExecution.executedPlan.outputPartitioning.numPartitions === 6) checkAnswer( query, df1.join(df2, $"key" === $"a" && $"value" === $"b").select($"key", $"value", $"a", $"b")) spark.catalog.uncacheTable("t1") spark.catalog.uncacheTable("t2") } } test("SPARK-15870 DataFrame can't execute after uncacheTable") { val selectStar = sql("SELECT * FROM testData WHERE key = 1") selectStar.createOrReplaceTempView("selectStar") spark.catalog.cacheTable("selectStar") checkAnswer( selectStar, Seq(Row(1, "1"))) spark.catalog.uncacheTable("selectStar") checkAnswer( selectStar, Seq(Row(1, "1"))) } test("SPARK-15915 Logical plans should use canonicalized plan when override sameResult") { val localRelation = Seq(1, 2, 3).toDF() localRelation.createOrReplaceTempView("localRelation") spark.catalog.cacheTable("localRelation") assert(getNumInMemoryRelations(localRelation) == 1) } test("SPARK-19093 Caching in side subquery") { withTempView("t1") { Seq(1).toDF("c1").createOrReplaceTempView("t1") spark.catalog.cacheTable("t1") val ds = sql( """ |SELECT * FROM t1 |WHERE |NOT EXISTS (SELECT * FROM t1) """.stripMargin) assert(getNumInMemoryRelations(ds) == 2) } } test("SPARK-19093 scalar and nested predicate query") { withTempView("t1", "t2", "t3", "t4") { Seq(1).toDF("c1").createOrReplaceTempView("t1") Seq(2).toDF("c1").createOrReplaceTempView("t2") Seq(1).toDF("c1").createOrReplaceTempView("t3") Seq(1).toDF("c1").createOrReplaceTempView("t4") spark.catalog.cacheTable("t1") spark.catalog.cacheTable("t2") spark.catalog.cacheTable("t3") spark.catalog.cacheTable("t4") // Nested predicate subquery val ds = sql( """ |SELECT * FROM t1 |WHERE |c1 IN (SELECT c1 FROM t2 WHERE c1 IN (SELECT c1 FROM t3 WHERE c1 = 1)) """.stripMargin) assert(getNumInMemoryRelations(ds) == 3) // Scalar subquery and predicate subquery val ds2 = sql( """ |SELECT * FROM (SELECT c1, max(c1) FROM t1 GROUP BY c1) |WHERE |c1 = (SELECT max(c1) FROM t2 GROUP BY c1) |OR |EXISTS (SELECT c1 FROM t3) |OR |c1 IN (SELECT c1 FROM t4) """.stripMargin) assert(getNumInMemoryRelations(ds2) == 4) } } test("SPARK-19765: UNCACHE TABLE should un-cache all cached plans that refer to this table") { withTable("t") { withTempPath { path => Seq(1 -> "a").toDF("i", "j").write.parquet(path.getCanonicalPath) sql(s"CREATE TABLE t USING parquet LOCATION '${path.toURI}'") spark.catalog.cacheTable("t") spark.table("t").select($"i").cache() checkAnswer(spark.table("t").select($"i"), Row(1)) assertCached(spark.table("t").select($"i")) Utils.deleteRecursively(path) spark.sessionState.catalog.refreshTable(TableIdentifier("t")) spark.catalog.uncacheTable("t") assert(spark.table("t").select($"i").count() == 0) assert(getNumInMemoryRelations(spark.table("t").select($"i")) == 0) } } } test("refreshByPath should refresh all cached plans with the specified path") { withTempDir { dir => val path = dir.getCanonicalPath() spark.range(10).write.mode("overwrite").parquet(path) spark.read.parquet(path).cache() spark.read.parquet(path).filter($"id" > 4).cache() assert(spark.read.parquet(path).filter($"id" > 4).count() == 5) spark.range(20).write.mode("overwrite").parquet(path) spark.catalog.refreshByPath(path) assert(spark.read.parquet(path).count() == 20) assert(spark.read.parquet(path).filter($"id" > 4).count() == 15) } } test("SPARK-19993 simple subquery caching") { withTempView("t1", "t2") { Seq(1).toDF("c1").createOrReplaceTempView("t1") Seq(2).toDF("c1").createOrReplaceTempView("t2") val sql1 = """ |SELECT * FROM t1 |WHERE |NOT EXISTS (SELECT * FROM t2) """.stripMargin sql(sql1).cache() val cachedDs = sql(sql1) assert(getNumInMemoryRelations(cachedDs) == 1) // Additional predicate in the subquery plan should cause a cache miss val cachedMissDs = sql( """ |SELECT * FROM t1 |WHERE |NOT EXISTS (SELECT * FROM t2 where c1 = 0) """.stripMargin) assert(getNumInMemoryRelations(cachedMissDs) == 0) } } test("SPARK-19993 subquery caching with correlated predicates") { withTempView("t1", "t2") { Seq(1).toDF("c1").createOrReplaceTempView("t1") Seq(1).toDF("c1").createOrReplaceTempView("t2") // Simple correlated predicate in subquery val sqlText = """ |SELECT * FROM t1 |WHERE |t1.c1 in (SELECT t2.c1 FROM t2 where t1.c1 = t2.c1) """.stripMargin sql(sqlText).cache() val cachedDs = sql(sqlText) assert(getNumInMemoryRelations(cachedDs) == 1) } } test("SPARK-19993 subquery with cached underlying relation") { withTempView("t1") { Seq(1).toDF("c1").createOrReplaceTempView("t1") spark.catalog.cacheTable("t1") // underlying table t1 is cached as well as the query that refers to it. val sqlText = """ |SELECT * FROM t1 |WHERE |NOT EXISTS (SELECT * FROM t1) """.stripMargin val ds = sql(sqlText) assert(getNumInMemoryRelations(ds) == 2) val cachedDs = sql(sqlText).cache() assert(getNumInMemoryTablesRecursively(cachedDs.queryExecution.sparkPlan) == 3) } } test("SPARK-19993 nested subquery caching and scalar + predicate subqueris") { withTempView("t1", "t2", "t3", "t4") { Seq(1).toDF("c1").createOrReplaceTempView("t1") Seq(2).toDF("c1").createOrReplaceTempView("t2") Seq(1).toDF("c1").createOrReplaceTempView("t3") Seq(1).toDF("c1").createOrReplaceTempView("t4") // Nested predicate subquery val sql1 = """ |SELECT * FROM t1 |WHERE |c1 IN (SELECT c1 FROM t2 WHERE c1 IN (SELECT c1 FROM t3 WHERE c1 = 1)) """.stripMargin sql(sql1).cache() val cachedDs = sql(sql1) assert(getNumInMemoryRelations(cachedDs) == 1) // Scalar subquery and predicate subquery val sql2 = """ |SELECT * FROM (SELECT c1, max(c1) FROM t1 GROUP BY c1) |WHERE |c1 = (SELECT max(c1) FROM t2 GROUP BY c1) |OR |EXISTS (SELECT c1 FROM t3) |OR |c1 IN (SELECT c1 FROM t4) """.stripMargin sql(sql2).cache() val cachedDs2 = sql(sql2) assert(getNumInMemoryRelations(cachedDs2) == 1) } } test("SPARK-23312: vectorized cache reader can be disabled") { Seq(true, false).foreach { vectorized => withSQLConf(SQLConf.CACHE_VECTORIZED_READER_ENABLED.key -> vectorized.toString) { val df = spark.range(10).cache() df.queryExecution.executedPlan.foreach { case i: InMemoryTableScanExec => assert(i.supportsBatch == vectorized && i.supportCodegen == vectorized) case _ => } } } } }
brad-kaiser/spark
sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
Scala
apache-2.0
28,450
package sparse abstract class Terminal[Token](token: Token) extends Symbol[Token]
gnieh/sparse
core/src/main/scala/sparse/Terminal.scala
Scala
apache-2.0
83
/* * Copyright (C) 2015 Stratio (http://stratio.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.stratio.sparta.serving.core.utils import java.util.UUID import akka.event.slf4j.SLF4JLogging import com.stratio.sparta.serving.core.constants.AppConstant import com.stratio.sparta.serving.core.constants.AppConstant._ import com.stratio.sparta.serving.core.curator.CuratorFactoryHolder import com.stratio.sparta.serving.core.exception.ServingCoreException import com.stratio.sparta.serving.core.helpers.FragmentsHelper import com.stratio.sparta.serving.core.models.policy.fragment.FragmentType._ import com.stratio.sparta.serving.core.models.policy.fragment.{FragmentElementModel, FragmentType} import com.stratio.sparta.serving.core.models.policy.{PolicyElementModel, PolicyModel} import com.stratio.sparta.serving.core.models.{ErrorModel, SpartaSerializer} import org.apache.curator.framework.CuratorFramework import org.json4s.jackson.Serialization._ import scala.collection.JavaConversions import scala.util.Try trait FragmentUtils extends SLF4JLogging with SpartaSerializer { val curatorFramework: CuratorFramework def findAllFragments: List[FragmentElementModel] = { if (CuratorFactoryHolder.existsPath(FragmentsPath)) { val children = curatorFramework.getChildren.forPath(FragmentsPath) JavaConversions.asScalaBuffer(children).toList.flatMap(fragmentType => findFragmentsByType(fragmentType)) } else List.empty[FragmentElementModel] } def findFragmentsByType(fragmentType: String): List[FragmentElementModel] = { val fragmentLocation = fragmentPathType(fragmentType) if (CuratorFactoryHolder.existsPath(fragmentLocation)) { val children = curatorFramework.getChildren.forPath(fragmentLocation) JavaConversions.asScalaBuffer(children).toList.map(id => findFragmentByTypeAndId(fragmentType, id)) } else List.empty[FragmentElementModel] } def findFragmentByTypeAndId(fragmentType: String, id: String): FragmentElementModel = { val fragmentLocation = s"${fragmentPathType(fragmentType)}/$id" if (CuratorFactoryHolder.existsPath(fragmentLocation)) { read[FragmentElementModel](new String(curatorFramework.getData.forPath(fragmentLocation))) } else throw new ServingCoreException(ErrorModel.toString( new ErrorModel(ErrorModel.CodeNotExistsFragmentWithId, s"Fragment type: $fragmentType and id: $id not exists"))) } def findFragmentByTypeAndName(fragmentType: String, name: String): Option[FragmentElementModel] = findFragmentsByType(fragmentType).find(fragment => fragment.name == name) def createFragment(fragment: FragmentElementModel): FragmentElementModel = findFragmentByTypeAndName(fragment.fragmentType, fragment.name.toLowerCase) .getOrElse(createNewFragment(fragment)) def updateFragment(fragment: FragmentElementModel): FragmentElementModel = { val newFragment = fragment.copy(name = fragment.name.toLowerCase) curatorFramework.setData().forPath( s"${fragmentPathType(newFragment.fragmentType)}/${fragment.id.get}", write(newFragment).getBytes) newFragment } def deleteAllFragments(): List[FragmentElementModel] = { val fragmentsFound = findAllFragments fragmentsFound.foreach(fragment => { val id = fragment.id.getOrElse { throw new ServingCoreException(ErrorModel.toString( new ErrorModel(ErrorModel.CodeErrorDeletingAllFragments, s"Fragment without id: ${fragment.name}."))) } deleteFragmentByTypeAndId(fragment.fragmentType, id) }) fragmentsFound } def deleteFragmentsByType(fragmentType: String): Unit = { val children = curatorFramework.getChildren.forPath(fragmentPathType(fragmentType)) val fragmentsFound = JavaConversions.asScalaBuffer(children).toList.map(element => read[FragmentElementModel](new String(curatorFramework.getData.forPath( s"${fragmentPathType(fragmentType)}/$element")))) fragmentsFound.foreach(fragment => { val id = fragment.id.getOrElse { throw new ServingCoreException(ErrorModel.toString( new ErrorModel(ErrorModel.CodeNotExistsFragmentWithId, s"Fragment without id: ${fragment.name}."))) } deleteFragmentByTypeAndId(fragmentType, id) }) } def deleteFragmentByTypeAndId(fragmentType: String, id: String): Unit = { val fragmentLocation = s"${fragmentPathType(fragmentType)}/$id" if (CuratorFactoryHolder.existsPath(fragmentLocation)) curatorFramework.delete().forPath(fragmentLocation) } def deleteFragmentByTypeAndName(fragmentType: String, name: String): Unit = { val fragmentFound = findFragmentByTypeAndName(fragmentType, name) if (fragmentFound.isDefined && fragmentFound.get.id.isDefined) { val id = fragmentFound.get.id.get val fragmentLocation = s"${fragmentPathType(fragmentType)}/$id" if (CuratorFactoryHolder.existsPath(fragmentLocation)) curatorFramework.delete().forPath(fragmentLocation) else throw new ServingCoreException(ErrorModel.toString(new ErrorModel( ErrorModel.CodeNotExistsFragmentWithId, s"Fragment type: $fragmentType and id: $id not exists"))) } else { throw new ServingCoreException(ErrorModel.toString(new ErrorModel( ErrorModel.CodeExistsFragmentWithName, s"Fragment without id: $name."))) } } /* PRIVATE METHODS */ private def createNewFragment(fragment: FragmentElementModel): FragmentElementModel = { val newFragment = fragment.copy( id = Option(UUID.randomUUID.toString), name = fragment.name.toLowerCase ) curatorFramework.create().creatingParentsIfNeeded().forPath( s"${fragmentPathType(newFragment.fragmentType)}/${newFragment.id.get}", write(newFragment).getBytes()) newFragment } private def fragmentPathType(fragmentType: String): String = { fragmentType match { case "input" => s"$FragmentsPath/input" case "output" => s"$FragmentsPath/output" case _ => throw new IllegalArgumentException("The fragment type must be input|output") } } /* POLICY METHODS */ def getPolicyWithFragments(policy: PolicyModel): PolicyModel = { val policyWithFragments = parseFragments(fillFragments(policy)) if (policyWithFragments.fragments.isEmpty) { val input = FragmentsHelper.populateFragmentFromPolicy(policy, FragmentType.input) val outputs = FragmentsHelper.populateFragmentFromPolicy(policy, FragmentType.output) policyWithFragments.copy(fragments = input ++ outputs) } else policyWithFragments } private def parseFragments(apConfig: PolicyModel): PolicyModel = { val fragmentInputs = getFragmentFromType(apConfig.fragments, FragmentType.input) val fragmentOutputs = getFragmentFromType(apConfig.fragments, FragmentType.output) apConfig.copy( input = Some(getCurrentInput(fragmentInputs, apConfig.input)), outputs = getCurrentOutputs(fragmentOutputs, apConfig.outputs)) } private def fillFragments(apConfig: PolicyModel): PolicyModel = { val currentFragments = apConfig.fragments.flatMap(fragment => { fragment.id match { case Some(id) => Try(findFragmentByTypeAndId(fragment.fragmentType, id)).toOption case None => findFragmentByTypeAndName(fragment.fragmentType, fragment.name) } }) apConfig.copy(fragments = currentFragments) } private def getFragmentFromType(fragments: Seq[FragmentElementModel], fragmentType: `type`) : Seq[FragmentElementModel] = { fragments.flatMap(fragment => if (FragmentType.withName(fragment.fragmentType) == fragmentType) Some(fragment) else None) } private def getCurrentInput(fragmentsInputs: Seq[FragmentElementModel], inputs: Option[PolicyElementModel]): PolicyElementModel = { if (fragmentsInputs.isEmpty && inputs.isEmpty) { throw new IllegalStateException("It is mandatory to define one input in the policy.") } if ((fragmentsInputs.size > 1) || (fragmentsInputs.size == 1 && inputs.isDefined && ((fragmentsInputs.head.name != inputs.get.name) || (fragmentsInputs.head.element.configuration.getOrElse( AppConstant.CustomTypeKey, fragmentsInputs.head.element.`type`) != inputs.get.configuration.getOrElse(AppConstant.CustomTypeKey, inputs.get.`type`))))) { throw new IllegalStateException("Only one input is allowed in the policy.") } if (fragmentsInputs.isEmpty) inputs.get else fragmentsInputs.head.element.copy(name = fragmentsInputs.head.name) } private def getCurrentOutputs(fragmentsOutputs: Seq[FragmentElementModel], outputs: Seq[PolicyElementModel]): Seq[PolicyElementModel] = { val outputsTypesNames = fragmentsOutputs.map(fragment => (fragment.element.configuration.getOrElse(AppConstant.CustomTypeKey, fragment.element.`type`), fragment.name)) val outputsNotIncluded = for { output <- outputs outputType = output.configuration.getOrElse(AppConstant.CustomTypeKey, output.`type`) outputTypeName = (outputType, output.name) } yield if (outputsTypesNames.contains(outputTypeName)) None else Some(output) fragmentsOutputs.map(fragment => fragment.element.copy(name = fragment.name)) ++ outputsNotIncluded.flatten } }
fjsc/sparta
serving-core/src/main/scala/com/stratio/sparta/serving/core/utils/FragmentUtils.scala
Scala
apache-2.0
9,825
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions.codegen import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ /** * Interface for generated predicate */ abstract class Predicate { def eval(r: InternalRow): Boolean } /** * Generates bytecode that evaluates a boolean [[Expression]] on a given input [[InternalRow]]. */ object GeneratePredicate extends CodeGenerator[Expression, (InternalRow) => Boolean] { protected def canonicalize(in: Expression): Expression = ExpressionCanonicalizer.execute(in) protected def bind(in: Expression, inputSchema: Seq[Attribute]): Expression = BindReferences.bindReference(in, inputSchema) protected def create(predicate: Expression): ((InternalRow) => Boolean) = { val ctx = newCodeGenContext() val eval = predicate.gen(ctx) val code = s""" public SpecificPredicate generate($exprType[] expr) { return new SpecificPredicate(expr); } class SpecificPredicate extends ${classOf[Predicate].getName} { private final $exprType[] expressions; ${declareMutableStates(ctx)} ${declareAddedFunctions(ctx)} public SpecificPredicate($exprType[] expr) { expressions = expr; ${initMutableStates(ctx)} } @Override public boolean eval(InternalRow ${ctx.INPUT_ROW}) { ${eval.code} return !${eval.isNull} && ${eval.value}; } }""" logDebug(s"Generated predicate '$predicate':\\n${CodeFormatter.format(code)}") val p = compile(code).generate(ctx.references.toArray).asInstanceOf[Predicate] (r: InternalRow) => p.eval(r) } }
pronix/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GeneratePredicate.scala
Scala
apache-2.0
2,474
/* * Copyright 2013 Stephan Rehfeld * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package test.scaladelray.math import org.scalatest.FunSpec import scaladelray.math.{Vector3, Normal3} import scaladelray.Constants /** * A behavior test of [[scaladelray.math.Normal3]]. * * @author Stephan Rehfeld */ class Normal3Spec extends FunSpec { describe( "A Normal3") { it( "should set the parameters for x, y, and z correctly" ) { val x = 3 val y = 4 val z = 5 val normal = Normal3( x, y, z ) assert( normal.x == x ) assert( normal.y == y ) assert( normal.z == z ) } it( "should be comparable" ) { val normal1 = Normal3( 2, 3, 5 ) val normal2 = Normal3( 2, 3, 5 ) assert( normal1 == normal2 ) } it( "should compute the product with a scalar correctly" ) { val normal = Normal3( 2, 3, 5 ) val v = 10 assert( normal * v == Normal3( 2 * v, 3 * v, 5 * v ) ) } it( "should compute the sum with another normal correctly" ) { val normal1 = Normal3( 2, 3, 5 ) val normal2 = Normal3( 7, 11, 13 ) assert( normal1 + normal2 == Normal3( 2+7, 3+11, 5+13 ) ) } it( "should compute the dot product with Vector3 correctly" ) { val normal = Normal3( 2, 3, 5 ) val vector = Vector3( 7, 11, 13 ) assert( (normal dot vector) == 2 * 7 + 3 * 11 + 5 * 13 ) } it( "should be convertible to a Vector3 with the same values for x, z, and y" ) { val normal = Normal3( 2, 3, 5 ) val vector = normal.asVector assert( normal.x == vector.x ) assert( normal.y == vector.y ) assert( normal.z == vector.z ) } it( "should not be altered after the multiplication with a scalar") { val normal = Normal3( 2, 3, 5 ) val v = 10 normal * v assert( normal.x == 2 ) assert( normal.y == 3 ) assert( normal.z == 5 ) } it( "should not be altered after the addition with another Normal3") { val normal1 = Normal3( 2, 3, 5 ) val normal2 = Normal3( 7, 11, 13 ) normal1 + normal2 assert( normal1.x == 2 ) assert( normal1.y == 3 ) assert( normal1.z == 5 ) } it( "should not be altered after being added to another Normal3") { val normal1 = Normal3( 2, 3, 5 ) val normal2 = Normal3( 7, 11, 13 ) normal1 + normal2 assert( normal2.x == 7 ) assert( normal2.y == 11 ) assert( normal2.z == 13 ) } it( "should not be altered after calculating the dot product with a Vector3") { val normal = Normal3( 2, 3, 5 ) val vector = Vector3( 7, 11, 13 ) normal dot vector assert( normal.x == 2 ) assert( normal.y == 3 ) assert( normal.z == 5 ) } it( "should not alter the Vector3 while calculating the dot product") { val normal = Normal3( 2, 3, 5 ) val vector = Vector3( 7, 11, 13 ) normal dot vector assert( vector.x == 7 ) assert( vector.y == 11 ) assert( vector.z == 13 ) } it( "should not be altered while converted to a Vector3") { val normal = Normal3( 2, 3, 5 ) normal.asVector assert( normal.x == 2 ) assert( normal.y == 3 ) assert( normal.z == 5 ) } it( "should respect the operator orders (* before +)") { val normal1 = Normal3( 2, 3, 5 ) val normal2 = Normal3( 7, 11, 13 ) val v = 10 assert( normal1 + normal2 * v == normal2 * v + normal1 ) } it( "should have a working unary - operator" ) { val normal = Normal3( 2, 3, 5 ) assert( -normal == Normal3( -2, -3, -5 ) ) } it( "should have a roughly-equals (=~=) operator that compares two normals math within the tolerance defined by Constants.EPSILON") { val n = Normal3( 0, 0, 0 ) assert( n =~= n ) assert( n =~= Normal3( n.x + Constants.EPSILON, n.y, n.z ) ) assert( n =~= Normal3( n.x - Constants.EPSILON, n.y, n.z ) ) assert( n =~= Normal3( n.x, n.y + Constants.EPSILON, n.z ) ) assert( n =~= Normal3( n.x, n.y - Constants.EPSILON, n.z ) ) assert( n =~= Normal3( n.x, n.y, n.z + Constants.EPSILON ) ) assert( n =~= Normal3( n.x, n.y, n.z - Constants.EPSILON ) ) } } }
stephan-rehfeld/scaladelray
src/test/scala/test/scaladelray/math/Normal3Spec.scala
Scala
apache-2.0
4,780
package org.jetbrains.plugins.scala package lang.navigation import org.jetbrains.plugins.scala.base.ScalaLightPlatformCodeInsightTestCaseAdapter import com.intellij.ide.util.gotoByName.GotoClassModel2 /** * @author Alefas * @since 23.12.13 */ class GoToClassTest extends ScalaLightPlatformCodeInsightTestCaseAdapter { protected override def rootPath(): String = baseRootPath() + "navigation/gotoclass/" def testTrait() { val gotoModel = new GotoClassModel2(getProjectAdapter) val length: Int = gotoModel.getElementsByName("GoToClassSimpleTrait", false, "GoToClassSimpleT").length assert(length == 1, s"Number of SimpleTraits is $length...") } def testObject() { val gotoModel = new GotoClassModel2(getProjectAdapter) val length: Int = gotoModel.getElementsByName("GoToClassSimpleObject$", false, "GoToClassSimpleO").length assert(length == 1, s"Number of SimpleObjects$$ is $length...") val length2: Int = gotoModel.getElementsByName("GoToClassSimpleObject", false, "GoToClassSimpleO").length assert(length2 == 0, s"Number of SimpleObjects is $length2...") } }
consulo/consulo-scala
test/org/jetbrains/plugins/scala/lang/navigation/GoToClassTest.scala
Scala
apache-2.0
1,110
package edu.gemini.model.p1.immutable /** * Identifies whether adaptive optics are used and if so whether * natural or laser guide stars are used. */ sealed trait AoPerspective { def toBoolean: Boolean = true } case object AoNone extends AoPerspective { override def toBoolean: Boolean = false } case object AoLgs extends AoPerspective case object AoNgs extends AoPerspective
arturog8m/ocs
bundle/edu.gemini.model.p1/src/main/scala/edu/gemini/model/p1/immutable/AoPerspective.scala
Scala
bsd-3-clause
388
/* * Copyright 2018 Analytics Zoo Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.zoo.pipeline.api.autograd import com.intel.analytics.bigdl.nn.Graph.ModuleNode import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity} import com.intel.analytics.bigdl.nn.keras.KerasLayer import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric import com.intel.analytics.bigdl.utils.{MultiShape, Shape, SingleShape} import com.intel.analytics.zoo.pipeline.api.Net import com.intel.analytics.zoo.pipeline.api.keras.layers.utils.KerasUtils import com.intel.analytics.zoo.pipeline.api.keras.models.Model import scala.reflect.ClassTag private[zoo] class LambdaTorch[T: ClassTag](val graph: Model[T])( implicit ev: TensorNumeric[T]) extends AbstractModule[Activity, Activity, T] { override def updateOutput(input: Activity): Activity = { output = graph.updateOutput(input) output } override def updateGradInput(input: Activity, gradOutput: Activity): Activity = { gradInput = graph.updateGradInput(input, gradOutput) gradInput } override def accGradParameters(input: Activity, gradOutput: Activity): Unit = { graph.accGradParameters(input, gradOutput) } } private[zoo] class Lambda[T: ClassTag](val func: (List[Variable[T]]) => Variable[T], inputShape: Shape = null)( implicit ev: TensorNumeric[T]) { def getInputShape(): Shape = inputShape def inputs(nodes : ModuleNode[T]*): ModuleNode[T] = { val inputShape = Shape(nodes.map {node => node.element.getOutputShape() }.toList) val lambda = this.create(KerasUtils.removeBatch(inputShape)) lambda.inputs(nodes : _*) } def inputs(nodes : Array[ModuleNode[T]]): ModuleNode[T] = { this.inputs(nodes : _*) } // There's no batch in the inputShape def create(inputShape: Shape): LambdaLayer[T] = { val inputs = inputShape match { case s: SingleShape => List(Variable[T](s)) case m: MultiShape => m.value.map(s => Variable[T](s)) } LambdaLayer[T](inputs.toArray, outVar = func(inputs), inputShape) } } object Lambda { def apply[T: ClassTag](func: (List[Variable[T]]) => Variable[T], inputShape: Shape = null)( implicit ev: TensorNumeric[T]): Lambda[T] = { new Lambda(func, inputShape) } } object LambdaLayer { def apply[T: ClassTag](inputs: Array[Variable[T]], outVar: Variable[T], inputShape: Shape)(implicit ev: TensorNumeric[T]): LambdaLayer[T] = { new LambdaLayer[T](outVar.toGraph(inputs), inputShape) } } class LambdaLayer[T: ClassTag] private (val graph: Model[T], val inputShape: Shape = null)(implicit ev: TensorNumeric[T]) extends KerasLayer[Activity, Activity, T](KerasUtils.addBatch(inputShape)) with Net { override def computeOutputShape(inputShape: Shape): Shape = { graph.getOutputShape() } override def doBuild(inputShape: Shape): LambdaTorch[T] = { new LambdaTorch[T](graph) } }
intel-analytics/analytics-zoo
zoo/src/main/scala/com/intel/analytics/zoo/pipeline/api/autograd/Lambda.scala
Scala
apache-2.0
3,489
package org.broadinstitute.dsde.firecloud.page import org.broadinstitute.dsde.firecloud.component.{Link, TestId} import org.broadinstitute.dsde.firecloud.page.library.DataLibraryPage import org.broadinstitute.dsde.firecloud.page.methodrepo.MethodRepoPage import org.broadinstitute.dsde.firecloud.page.workspaces.WorkspaceListPage import org.openqa.selenium.WebDriver abstract class BaseFireCloudPage(implicit webDriver: WebDriver) extends AuthenticatedPage { def goToWorkspaces(): WorkspaceListPage = { Link(TestId("workspace-nav-link")).doClick() await ready new WorkspaceListPage } def goToDataLibrary(): DataLibraryPage = { Link(TestId("library-nav-link")).doClick() await ready new DataLibraryPage } def goToMethodRepository(): MethodRepoPage = { Link(TestId("method-repo-nav-link")).doClick() await ready new MethodRepoPage() } }
broadinstitute/firecloud-ui
automation/src/test/scala/org/broadinstitute/dsde/firecloud/page/BaseFireCloudPage.scala
Scala
bsd-3-clause
876
package com.waywardcode.math import java.math.BigInteger object FastFib { private val TWO = BigInteger.valueOf(2L) private def recFib(n: Int) : Tuple2[BigInteger,BigInteger] = { if(n == 0) { return (BigInteger.ZERO, BigInteger.ONE) } val (a,b) = recFib(n/2) val c = ((b multiply TWO) subtract a) multiply a val d = (a multiply a) add (b multiply b) (n & 1) match { case 0 => (c,d) case _ => (d, (c add d)) } } def apply(n: Int): BigInteger = recFib(n)._1 }
waywardcoder/small_programs
fast_fibo/scala_version/fibs.scala
Scala
gpl-2.0
536
/* * Copyright (c) 2014, Cloudera, Inc. All Rights Reserved. * * Cloudera, Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"). You may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * This software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for * the specific language governing permissions and limitations under the * License. */ package com.cloudera.oryx.api.speed import org.apache.hadoop.conf.Configuration import org.apache.spark.rdd.RDD import com.cloudera.oryx.api.KeyMessage /** * Scala counterpart to Java SpeedModelManager. * * @tparam K type of key read from input topic * @tparam M type of message read from input topic * @tparam U type of update message read/written */ trait ScalaSpeedModelManager[K,M,U] { /** * Called by the framework to initiate a continuous process of reading models, and reading * from the input topic and updating model state in memory, and issuing updates to the * update topic. This will be executed asynchronously and may block. * * @param updateIterator iterator to read models from * @param hadoopConf Hadoop context, which may be required for reading from HDFS */ def consume(updateIterator: Iterator[KeyMessage[String,U]], hadoopConf: Configuration): Unit /** * @param newData RDD of raw new data from the topic * @return updates to publish on the update topic */ def buildUpdates(newData: RDD[(K,M)]): Iterable[U] def close(): Unit }
jordancheah/oryx
framework/oryx-api/src/main/scala/com/cloudera/oryx/api/speed/ScalaSpeedModelManager.scala
Scala
apache-2.0
1,671
package ca.uwo.eng.sel.cepsim.query import ca.uwo.eng.sel.cepsim.gen.Generator import org.junit.runner.RunWith import org.mockito.Mockito._ import org.scalatest.{FlatSpec, Matchers} import org.scalatest.junit.JUnitRunner import org.scalatest.mock.MockitoSugar @RunWith(classOf[JUnitRunner]) class QueryTest extends FlatSpec with Matchers with MockitoSugar { trait Fixture { val generator = mock[Generator] val prod1 = EventProducer("p1", 10, generator) val cons1 = EventConsumer("c1", 10) val f1 = Operator("f1", 1000) var q = new Query("q1") q addVertices(prod1, f1, cons1) } "A query" should "correctly connect the vertices of the DAG" in new Fixture { q addEdge (prod1, f1) q addEdge (f1, cons1) q.vertices.size should be (3) q successors(prod1) should be (Set(f1)) q successors(f1) should be (Set(cons1)) q successors(cons1) should be(Set.empty) } it should "allow multiple output per operators" in new Fixture { val s1 = Operator("s1", 10000) val f2 = Operator("f2", 10000) val m1 = Operator("m1", 10000) q addVertices(prod1, f1, cons1) q addVertices(s1, f2, m1) q addEdges((prod1, s1, 1.0), (s1, f1, 1.0), (s1, f2, 1.0), (f1, m1, 1.0), (f2, m1, 1.0), (m1, cons1, 1.0)) q.vertices.size should be (6) q successors(s1) should be (Set(f1, f2)) q successors(f1) should be (Set(m1)) } it should "return detailed connections information" in new Fixture { val s1 = Operator("s1", 10000) val f2 = Operator("f2", 10000) val m1 = Operator("m1", 10000) q addVertices(prod1, f1, cons1) q addVertices(s1, f2, m1) q addEdges((prod1, s1, 1.0), (s1, f1, 1.0), (s1, f2, 1.0), (f1, m1, 1.0), (f2, m1, 1.0), (m1, cons1, 1.0)) q.edges(prod1) should have size (1) q.edges(prod1) should contain theSameElementsAs Set(Edge(prod1, s1, 1.0)) q.edges(s1) should have size (2) q.edges(s1) should contain theSameElementsAs Set(Edge(s1, f1, 1.0), Edge(s1, f2, 1.0)) //q.edges(s1) should be q.edges(f1) should have size (1) q.edges(f1) should contain theSameElementsAs Set(Edge(f1, m1, 1.0)) q.edges(f2) should have size (1) q.edges(f2) should contain theSameElementsAs Set(Edge(f2, m1, 1.0)) q.edges(m1) should have size (1) q.edges(m1) should contain theSameElementsAs Set(Edge(m1, cons1, 1.0)) } }
virsox/cepsim
cepsim-core/src/test/scala/ca/uwo/eng/sel/cepsim/query/QueryTest.scala
Scala
mit
2,391
package dsorting.states import dsorting.future.Subscription import dsorting.messaging.{ChannelTable, MessageListener} import dsorting.primitive.PartitionTable import scala.concurrent.Future trait State[T] { val listener: MessageListener val listenerSubscription: Subscription def run(): Future[T] } trait ConnectedWorkers { val partitionTable: PartitionTable val channelTable: ChannelTable }
Qwaz/scala-distributed-sorting
src/main/scala/dsorting/states/State.scala
Scala
apache-2.0
406
package lv.addresses.indexer trait SpatialIndexer { this: AddressFinder => import scala.collection.mutable.SortedSet case class Node(code: Int, left: Node, right: Node) protected var _spatialIndex: Node = null class Search(val limit: Int) { private val nearest = SortedSet[(AddrObj, BigDecimal)]()(new Ordering[(AddrObj, BigDecimal)] { def compare(a: (AddrObj, BigDecimal), b: (AddrObj, BigDecimal)) = if (a._2 < b._2) -1 else if (a._2 > b._2) 1 else 0 }) private def dist(px: BigDecimal, py: BigDecimal, ax: BigDecimal, ay: BigDecimal) = (px - ax).pow(2) + (py - ay).pow(2) def searchNearest(coordX: BigDecimal, coordY: BigDecimal) = { val found = scala.collection.mutable.Set[AddrObj]() def search(node: Node, depth: Int = 0): AddrObj = if (node == null) null else { import node._ val curr_addr = addressMap(code) def closest(addr: AddrObj, new_addr: AddrObj) = if (new_addr == null || found(new_addr)) if (found(addr)) null else addr else if (addr == null || found(addr)) new_addr else if (dist(coordX, coordY, addr.coordX, addr.coordY) < dist(coordX, coordY, new_addr.coordX, new_addr.coordY)) addr else new_addr def check_x_splitting_pane(addr: AddrObj) = addr == null || dist(coordX, coordY, addr.coordX, addr.coordY) >= dist(coordX, 0, curr_addr.coordX, 0) def check_y_splitting_pane(addr: AddrObj) = addr == null || dist(coordX, coordY, addr.coordX, addr.coordY) >= dist(0, coordY, 0, curr_addr.coordY) def traverse(left: Node, right: Node, check_splitting_pane_cross: AddrObj => Boolean, start_with_left: Boolean) = { val (first, second) = if (start_with_left) (left, right) else (right, left) val curr_best_addr = closest(curr_addr, search(first, depth + 1)) if (check_splitting_pane_cross(curr_best_addr)) closest(curr_best_addr, search(second, depth + 1)) else curr_best_addr } if (depth % 2 == 0) traverse(left, right, check_x_splitting_pane, coordX <= curr_addr.coordX) //x axis else traverse(left, right, check_y_splitting_pane, coordY <= curr_addr.coordY) //y axis } nearest.clear() 1 to limit map { _ => val nearest_addr = search(_spatialIndex) found += nearest_addr nearest_addr } foreach (a => nearest += (a -> dist(coordX, coordY, a.coordX, a.coordY))) nearest.toList } def searchNearestFullScan(coordX: BigDecimal, coordY: BigDecimal) = { nearest.clear() addressMap.foreach { case (c, o) => if (o.coordX != null && o.coordY != null) { nearest += (o -> dist(coordX, coordY, o.coordX, o.coordY)) if (nearest.size > limit) nearest.lastOption.foreach(nearest -= _) } } nearest.toList } } def spatialIndex(addressMap: Map[Int, AddrObj]) = { var nodeCount = 0 def kdtree(addresses: Seq[Int], depth: Int = 0): Node = addresses match { case Seq() => null case Seq(c) => nodeCount += 1 Node(c, null, null) case _ => val axis = depth % 2 val sorted = addresses.sortBy { c => val a = addressMap(c) if (axis == 0) a.coordX else a.coordY } val median = sorted.size / 2 nodeCount += 1 Node( sorted(median), kdtree(sorted.slice(0, median), depth + 1), kdtree(sorted.slice(median, sorted.size), depth + 1) ) } logger.info("Creating spatial index ... ") _spatialIndex = kdtree(addressMap .keysIterator .filter(c => addressMap .get(c) .exists { a => a.coordX != null && a.coordY != null }) .toIndexedSeq) logger.info(s"Spatial index created ($nodeCount addresses indexed).") } }
mrumkovskis/addresses
indexer/src/main/scala/SpatialIndexer.scala
Scala
mit
3,876
package org.scalaide.core.lexical import org.eclipse.jface.text.rules.Token import org.eclipse.jface.text.rules.ITokenScanner import org.scalaide.ui.syntax.ScalaSyntaxClass import org.eclipse.jface.util.PropertyChangeEvent import org.eclipse.jface.preference.IPreferenceStore /** Base class for Scala specific token scanners. * * @see org.scalaide.core.lexical.ScalaCodeScanners. */ trait AbstractScalaScanner extends ITokenScanner { /** Updates the UI configuration for the tokens managed by this scanner, * according to the new preferences. */ def adaptToPreferenceChange(event: PropertyChangeEvent) = for ((syntaxClass, token) <- tokens) token.setData(getTextAttribute(syntaxClass)) /** Returns the preference store used to configure the tokens managed by * this scanner. */ protected def preferenceStore: IPreferenceStore /** Returns the token corresponding to the given [[ScalaSyntaxClass]]. */ protected def getToken(syntaxClass: ScalaSyntaxClass): Token = tokens.getOrElse(syntaxClass, createToken(syntaxClass)) private var tokens: Map[ScalaSyntaxClass, Token] = Map() private def createToken(syntaxClass: ScalaSyntaxClass) = { val token = new Token(getTextAttribute(syntaxClass)) tokens = tokens + (syntaxClass -> token) token } private def getTextAttribute(syntaxClass: ScalaSyntaxClass) = syntaxClass.getTextAttribute(preferenceStore) }
Kwestor/scala-ide
org.scala-ide.sdt.core/src/org/scalaide/core/lexical/AbstractScalaScanner.scala
Scala
bsd-3-clause
1,423
package com.stovokor.editor.state import com.simsilica.lemur.input.StateFunctionListener import com.stovokor.util.EditorEventListener import com.stovokor.util.EditorEvent import com.jme3.app.state.AppStateManager import com.stovokor.editor.input.InputFunction import com.stovokor.util.ExportMap import com.stovokor.util.EventBus import com.jme3.app.Application import com.stovokor.util.ChangeMaterial import com.stovokor.editor.model.repository.BorderRepository import com.stovokor.editor.model.repository.SectorRepository import com.stovokor.util.SectorSurfaceMutator import com.stovokor.editor.gui.GuiFactory import com.simsilica.lemur.Container import com.stovokor.editor.model.repository.Repositories import com.stovokor.editor.model.SimpleMaterial import com.stovokor.editor.model.SurfaceMaterial import com.stovokor.editor.model.MatDefMaterial import com.stovokor.util.EditSettings import com.stovokor.editor.model.Settings import com.stovokor.util.SettingsUpdated import java.io.File import java.io.FileFilter import com.jme3.asset.plugins.FileLocator class SettingsLoaderState extends BaseState with EditorEventListener { val settingsRepository = Repositories.settingsRepository val materialRepository = Repositories.materialRepository override def initialize(stateManager: AppStateManager, simpleApp: Application) { super.initialize(stateManager, simpleApp) EventBus.subscribe(this, SettingsUpdated()) reloadSettings() } override def cleanup() { super.cleanup EventBus.removeFromAll(this) } def onEvent(event: EditorEvent) = event match { case SettingsUpdated() => reloadSettings() case _ => } def reloadSettings() { val settings = settingsRepository.get() reloadMaterials(settings.assetsBasePath) } def reloadMaterials(path: String) { val dir = new File(path) if (dir.exists() && dir.isDirectory()) { assetManager.registerLocator(dir.getPath, classOf[FileLocator]) loadSimpleTextures(dir) loadMaterialDefinitions(dir) } else { println(s"Error: cannot load settings $path") } } def loadRecursively(baseDir: File, dir: File, filter: FileFilter, builder: String => SurfaceMaterial) { if (dir.exists() && dir.isDirectory()) { dir.listFiles(filter).foreach(file => { val relPath = baseDir.toPath.relativize(file.toPath) materialRepository.add(builder(relPath.toString)) }) dir.listFiles(f => f.isDirectory) .foreach(f => loadRecursively(baseDir, f, filter, builder)) } } def loadSimpleTextures(dir: File) { // TODO accept more file types val extensions = Set(".png", ".jpg", ".bmp") def isImage(file: File) = file.isFile() && extensions.find(file.getName.endsWith).isDefined loadRecursively(dir, new File(dir, "Textures"), isImage, SimpleMaterial(_)) } def loadMaterialDefinitions(dir: File) { def isMatDef(file: File) = file.isFile() && file.getName.endsWith(".j3m") loadRecursively(dir, new File(dir, "Materials"), isMatDef, MatDefMaterial(_)) } }
jcfandino/leveleditor
src/main/scala/com/stovokor/editor/state/SettingsLoaderState.scala
Scala
bsd-3-clause
3,068
package water.sparkling.itest.local import hex.deeplearning.DeepLearning import hex.deeplearning.DeepLearningParameters import org.apache.spark.SparkContext import org.apache.spark.examples.h2o.Airlines import org.apache.spark.h2o.H2OContext import org.apache.spark.rdd.RDD import org.apache.spark.sql.SQLContext import org.junit.runner.RunWith import org.scalatest.FunSuite import org.scalatest.junit.JUnitRunner import water.app.SparkContextSupport import water.fvec.H2OFrame import water.sparkling.itest.{LocalTest, SparkITest} /** * PUBDEV-928 test suite. * * Verifies that DL can be run on 0-length chunks. */ @RunWith(classOf[JUnitRunner]) class PubDev928TestSuite extends FunSuite with SparkITest { test("Verify scoring on 0-length chunks", LocalTest) { launch("water.sparkling.itest.local.PubDev928Test", env { sparkMaster("local-cluster[3,2,1024]") } ) } } object PubDev928Test extends SparkContextSupport { def main(args: Array[String]): Unit = { val conf = configure("PUBDEV-928") val sc = new SparkContext(conf) val h2oContext = new H2OContext(sc).start() import h2oContext._ val sqlContext = new SQLContext(sc) import sqlContext.implicits._ val airlinesData = new H2OFrame(new java.io.File("examples/smalldata/allyears2k_headers.csv.gz")) val airlinesTable : RDD[Airlines] = asRDD[Airlines](airlinesData) airlinesTable.toDF.registerTempTable("airlinesTable") val query = "SELECT * FROM airlinesTable WHERE Dest LIKE 'SFO'" val result: H2OFrame = sqlContext.sql(query) // Using a registered context and table val train: H2OFrame = result('Year, 'Month, 'DayofMonth, 'DayOfWeek, 'CRSDepTime, 'CRSArrTime, 'UniqueCarrier, 'FlightNum, 'TailNum, 'CRSElapsedTime, 'Origin, 'Dest, 'Distance, 'IsDepDelayed ) //train.replace(train.numCols()-1, train.lastVec().toEnum) println(train.lastVec().naCnt()) // Configure Deep Learning algorithm val dlParams = new DeepLearningParameters() dlParams._train = train dlParams._response_column = 'IsDepDelayed val dl = new DeepLearning(dlParams) val dlModel = dl.trainModel.get // THIS WILL FAIL val testFrame : H2OFrame = result // Verify that testFrame has at least on chunk with 0-rows val av = testFrame.anyVec(); assert( (0 until av.nChunks()).exists(idx => av.chunkForChunkIdx(idx).len() == 0), "At least on chunk with 0-rows has to exist!") val predictionH2OFrame = dlModel.score(testFrame)('predict) assert(predictionH2OFrame.numRows() == testFrame.numRows()) } }
tromika/sparkling-water
examples/src/integTest/scala/water/sparkling/itest/local/PubDev928TestSuite.scala
Scala
apache-2.0
2,586
package com.nidkil.kvk.model import com.nidkil.util.Util import play.api.libs.json._ import play.api.libs.functional.syntax._ case class Organisatie( var handelsnaam: String, var statutaireNaam: String, var bestaandeHandelsnamen: Option[Array[String]] = None, var vervallenHandelsnamen: Option[Array[String]] = None, var kvkNummer: String, var vestigingsnummer: Option[String] = None, var hoofdvestiging: Boolean = false, var rechtspersoon: Boolean = false, var samenwerkingsverband: Option[String] = None, var adres: Option[Adres] = None, var status: Option[String] = None) { private def getHandelsnamenAsString() = { bestaandeHandelsnamen.getOrElse(Array()).mkString("; ") } private def getVervallenHandelsnamenAsString() = { vervallenHandelsnamen.getOrElse(Array()).mkString("; ") } override def toString(): String = Util.template("handelsnaam=", Some(handelsnaam), ", ") + Util.template("statutaireNaam=", Some(statutaireNaam), ", ") + Util.template("bestaandeHandelsnamen=", Some(getHandelsnamenAsString()), ", ") + Util.template("vervallenHandelsnamen=", Some(getVervallenHandelsnamenAsString()), ", ") + Util.template("kvkNummer=", Some(kvkNummer), ", ") + Util.template("vestigingsnummer=", vestigingsnummer, ", ") + Util.template("hoofdvestiging=", Some(hoofdvestiging), ", ") + Util.template("rechtspersoon=", Some(rechtspersoon), ", ") + Util.template("samenwerkingsverband=", samenwerkingsverband, ", ") + Util.template("adres=", adres, ", ") + Util.template("status=", status, "") } object Organisatie { val organisatieWrites: Writes[Organisatie] = ( (JsPath \\ "handelsnaam").write[String] and (JsPath \\ "statutaireNaam").write[String] and (JsPath \\ "bestaandeHandelsnamen").writeNullable[Array[String]] and (JsPath \\ "vervallenHandelsnamen").writeNullable[Array[String]] and (JsPath \\ "kvkNummer").write[String] and (JsPath \\ "vestigingsnummer").writeNullable[String] and (JsPath \\ "hoofdvestiging").write[Boolean] and (JsPath \\ "rechtspersoon").write[Boolean] and (JsPath \\ "samenwerkingsverband").writeNullable[String] and (JsPath \\ "adres").writeNullable[Adres] and (JsPath \\ "status").writeNullable[String])(unlift(Organisatie.unapply)) val organisatieReads: Reads[Organisatie] = ( (JsPath \\ "handelsnaam").read[String] and (JsPath \\ "statutaireNaam").read[String] and (JsPath \\ "bestaandeHandelsnamen").readNullable[Array[String]] and (JsPath \\ "vervallenHandelsnamen").readNullable[Array[String]] and (JsPath \\ "kvkNummer").read[String] and (JsPath \\ "vestigingsnummer").readNullable[String] and (JsPath \\ "hoofdvestiging").read[Boolean] and (JsPath \\ "rechtspersoon").read[Boolean] and (JsPath \\ "samenwerkingsverband").readNullable[String] and (JsPath \\ "adres").readNullable[Adres] and (JsPath \\ "status").readNullable[String])(Organisatie.apply _) implicit val organisatiesFormat: Format[Organisatie] = Format(organisatieReads, organisatieWrites) }
nidkil/scala-kvk-web-scraper
src/main/scala/com/nidkil/kvk/model/Organisatie.scala
Scala
apache-2.0
3,053
package org.jetbrains.plugins.scala package lang package psi package impl package base package types import com.intellij.lang.ASTNode import com.intellij.psi._ import com.intellij.psi.scope.PsiScopeProcessor import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor import org.jetbrains.plugins.scala.lang.psi.api.base.types._ import org.jetbrains.plugins.scala.lang.psi.api.statements.ScTypeAliasDefinition import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypeParametersOwner import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createTypeElementFromText import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.ScSyntheticClass import org.jetbrains.plugins.scala.lang.psi.types._ import org.jetbrains.plugins.scala.lang.psi.types.api.Any import org.jetbrains.plugins.scala.lang.psi.types.result.{Failure, TypeResult, TypingContext} import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult import org.jetbrains.plugins.scala.macroAnnotations.{Cached, ModCount} import scala.annotation.tailrec /** * @author Alexander Podkhalyuzin, ilyas */ class ScParameterizedTypeElementImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScParameterizedTypeElement { override def desugarizedText: String = { val inlineSyntaxIds = Set("?", "+?", "-?") def kindProjectorFunctionSyntax(elem: ScTypeElement): String = { def convertParameterized(param: ScParameterizedTypeElement): String = { param.typeElement.getText match { case v@("+" | "-") => //Ξ»[(-[A], +[B]) => Function2[A, Int, B]] param.typeArgList.typeArgs match { case Seq(simple) => v ++ simple.getText case _ => "" //should have only one type arg } case _ => param.getText //it's a higher kind type } } def convertSimpleType(simple: ScSimpleTypeElement) = simple.getText.replaceAll("`", "") elem match { case fun: ScFunctionalTypeElement => fun.returnTypeElement match { case Some(ret) => val typeName = "Ξ›$" val paramText = fun.paramTypeElement match { case tuple: ScTupleTypeElement => val paramList = tuple.components.map { case parameterized: ScParameterizedTypeElement => convertParameterized(parameterized) case simple: ScSimpleTypeElement => convertSimpleType(simple) case _ => return null //something went terribly wrong } paramList.mkString(sep = ", ") case simple: ScSimpleTypeElement => simple.getText.replaceAll("`", "") case parameterized: ScParameterizedTypeElement => convertParameterized(parameterized) case _ => return null } s"({type $typeName[$paramText] = ${ret.getText}})#$typeName" case _ => null } case _ => null } } def kindProjectorInlineSyntax(e: PsiElement) = { def generateName(i: Int): String = { //kind projector generates names the same way val res = ('Ξ±' + (i % 25)).toChar.toString if (i < 25) res else res + (i / 25) } val (paramOpt: Seq[Option[String]], body: Seq[String]) = typeArgList.typeArgs.zipWithIndex.map { case (simple: ScSimpleTypeElement, i) if inlineSyntaxIds.contains(simple.getText) => val name = generateName(i) (Some(simple.getText.replace("?", name)), name) case (param: ScParameterizedTypeElement, i) if inlineSyntaxIds.contains(param.typeElement.getText) => val name = generateName(i) (Some(param.getText.replace("?", name)), name) case (a, _) => (None, a.getText) }.unzip val paramText = paramOpt.flatten.mkString(start = "[", sep = ", ", end = "]") val bodyText = body.mkString(start = "[", sep = ", ", end = "]") s"({type ${"Ξ›$"}$paramText = ${typeElement.getText}$bodyText})#${"Ξ›$"}" } def existentialType = { val forSomeBuilder = new StringBuilder var count = 1 forSomeBuilder.append(" forSome {") val typeElements = typeArgList.typeArgs.map { case w: ScWildcardTypeElement => forSomeBuilder.append("type _" + "$" + count + w.lowerTypeElement.fold("")(te => s" >: ${te.getText}") + w.upperTypeElement.fold("")(te => s" <: ${te.getText}")) forSomeBuilder.append("; ") val res = s"_$$$count" count += 1 res case t => t.getText } forSomeBuilder.delete(forSomeBuilder.length - 2, forSomeBuilder.length) forSomeBuilder.append("}") s"(${typeElement.getText}${typeElements.mkString("[", ", ", "]")} ${forSomeBuilder.toString()})" } val kindProjectorEnabled = ScalaPsiUtil.kindProjectorPluginEnabled(this) def isKindProjectorFunctionSyntax(element: PsiElement): Boolean = { typeElement.getText match { case "Lambda" | "Ξ»" if kindProjectorEnabled => true case _ => false } } @tailrec def isKindProjectorInlineSyntax(element: PsiElement): Boolean = { element match { case simple: ScSimpleTypeElement if kindProjectorEnabled && inlineSyntaxIds.contains(simple.getText) => true case parametrized: ScParameterizedTypeElement if kindProjectorEnabled => isKindProjectorInlineSyntax(parametrized.typeElement) case _ => false } } typeArgList.typeArgs.find { case e: ScFunctionalTypeElement if isKindProjectorFunctionSyntax(e) => true case e if isKindProjectorInlineSyntax(e) => true case _: ScWildcardTypeElementImpl => true case _ => false } match { case Some(fun) if isKindProjectorFunctionSyntax(fun) => kindProjectorFunctionSyntax(fun) case Some(e) if isKindProjectorInlineSyntax(e) => kindProjectorInlineSyntax(e) case Some(_) => existentialType case _ => null } } //computes desugarized type either for existential type or one of kind projector types @Cached(ModCount.getBlockModificationCount, this) override def computeDesugarizedType: Option[ScTypeElement] = Option(desugarizedText) match { case Some(text) => Option(createTypeElementFromText(text, getContext, this)) case _ => None } override protected def innerType: TypeResult[ScType] = { computeDesugarizedType match { case Some(typeElement) => return typeElement.getType() case _ => } val tr = typeElement.getType() val res = tr.getOrElse(return tr) //todo: possible refactoring to remove parameterized type inference in simple type typeElement match { case s: ScSimpleTypeElement => s.reference match { case Some(ref) => if (ref.isConstructorReference) { ref.resolveNoConstructor match { case Array(ScalaResolveResult(to: ScTypeParametersOwner, _: ScSubstitutor)) if to.isInstanceOf[PsiNamedElement] => return tr //all things were done in ScSimpleTypeElementImpl.innerType case Array(ScalaResolveResult(to: PsiTypeParameterListOwner, _: ScSubstitutor)) if to.isInstanceOf[PsiNamedElement] => return tr //all things were done in ScSimpleTypeElementImpl.innerType case _ => } } ref.bind() match { case Some(ScalaResolveResult(_: PsiMethod, _)) => return tr //all things were done in ScSimpleTypeElementImpl.innerType case _ => } case _ => } case _ => } val args: scala.Seq[ScTypeElement] = typeArgList.typeArgs if (args.isEmpty) return tr val argTypesWrapped = args.map {_.getType()} val argTypesgetOrElseped = argTypesWrapped.map {_.getOrAny} def fails(t: ScType) = (for (f@Failure(_, _) <- argTypesWrapped) yield f).foldLeft(this.success(t))(_.apply(_)) //Find cyclic type references argTypesWrapped.find(_.isCyclic) match { case Some(_) => fails(ScParameterizedType(res, Seq(argTypesgetOrElseped.toSeq: _*))) case None => val typeArgs = args.map(_.getType()) val result = ScParameterizedType(res, typeArgs.map(_.getOrAny)) (for (f@Failure(_, _) <- typeArgs) yield f).foldLeft(this.success(result))(_.apply(_)) } } override def accept(visitor: ScalaElementVisitor) { visitor.visitParameterizedTypeElement(this) } override def accept(visitor: PsiElementVisitor) { visitor match { case s: ScalaElementVisitor => s.visitParameterizedTypeElement(this) case _ => super.accept(visitor) } } override def processDeclarations(processor: PsiScopeProcessor, state: ResolveState, lastParent: PsiElement, place: PsiElement): Boolean = { if (ScalaPsiUtil.kindProjectorPluginEnabled(this)) { computeDesugarizedType match { case Some(projection: ScTypeProjection) => projection.typeElement match { case paren: ScParenthesisedTypeElement => paren.typeElement match { case Some(compound: ScCompoundTypeElement) => compound.refinement match { case Some(ref) => ref.types match { case Seq(alias: ScTypeAliasDefinition) => for (tp <- alias.typeParameters) { val text = tp.getText val lowerBound = text.indexOf(">:") val upperBound = text.indexOf("<:") //we have to call processor execute so both `+A` and A resolve: Lambda[`+A` => (A, A)] processor.execute(tp, state) processor.execute(new ScSyntheticClass(s"`$text`", Any), state) if (lowerBound < 0 && upperBound > 0) { processor.execute(new ScSyntheticClass(text.substring(0, upperBound), Any), state) } else if (upperBound < 0 && lowerBound > 0) { processor.execute(new ScSyntheticClass(text.substring(0, lowerBound), Any), state) } else if (upperBound > 0 && lowerBound > 0) { val actualText = text.substring(0, math.min(lowerBound, upperBound)) processor.execute(new ScSyntheticClass(actualText, Any), state) } } case _ => } case _ => } case _ => } case _ => } processor.execute(new ScSyntheticClass("+", Any), state) processor.execute(new ScSyntheticClass("-", Any), state) case _ => } } super.processDeclarations(processor, state, lastParent, place) } }
loskutov/intellij-scala
src/org/jetbrains/plugins/scala/lang/psi/impl/base/types/ScParameterizedTypeElementImpl.scala
Scala
apache-2.0
11,042
package com.quantifind.charts import org.scalatest.Matchers import org.scalatest.FunSuite import com.quantifind.charts.highcharts._ import Highchart._ /** * User: austin * Date: 10/4/13 */ class HighchartTest extends FunSuite with Matchers { test("Single point Highchart to json") { val hc = Highchart(Seq(Series(Seq(Data(1, 2)))), chart = Chart(zoomType = Zoom.xy), xAxis = None, yAxis = None).toServiceFormat hc should be ("highcharts", Map( "series" -> List(Map("data" -> List(Map("x" -> 1, "y" -> 2)), "type" -> "line")), "chart" -> Map("zoomType" -> "xy"), "exporting" -> Map("filename" -> "chart"), "plotOptions" -> Map( "line" -> Map("turboThreshold" -> 0) ), "credits" -> Map( "href" -> "", "text" -> "" ), "title" -> Map("text" -> "") ) ) } }
quantifind/wisp
core/src/test/scala/com/qf/charts/HighchartTest.scala
Scala
apache-2.0
881
/* Copyright 2009-2016 EPFL, Lausanne */ package leon.regression.testcases import leon._ import leon.test._ import java.io.File abstract class TestCasesCompile(testDir: String) extends LeonRegressionSuite { val pipeline = frontends.scalac.ExtractionPhase andThen new utils.PreprocessingPhase private def filesIn(path : String): Seq[File] = { val fs = filesInResourceDir(path, _.endsWith(".scala"), recursive=true) fs.toSeq } val baseDir = "regression/testcases/" val allTests = filesIn(baseDir + testDir) allTests.foreach { f => val path = f.getAbsolutePath val index = path.indexOf(baseDir) val name = path.drop(index) test("Compiling "+name) { val ctx = createLeonContext() try { pipeline.run(ctx, List(f.getAbsolutePath)) } catch { case fe: LeonFatalError => fail(ctx, s"Failed to compile $name", fe) } } } } class TestcasesCompile1 extends TestCasesCompile("repair/") class TestcasesCompile2 extends TestCasesCompile("runtime/") class TestcasesCompile3 extends TestCasesCompile("synthesis/") class TestcasesCompile4 extends TestCasesCompile("verification/") class TestcasesCompile5 extends TestCasesCompile("web/")
regb/leon
src/test/scala/leon/regression/testcases/TestCasesCompile.scala
Scala
gpl-3.0
1,226
/* * Copyright (C) 2015 Red Bull Media House GmbH <http://www.redbullmediahouse.com> - all rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.rbmhtechnology.example //#order-definition case class Order(id: String, items: List[String] = Nil, cancelled: Boolean = false) { def addItem(item: String): Order = copy(items = item :: items) def removeItem(item: String): Order = copy(items = items.filterNot(_ == item)) def cancel: Order = copy(cancelled = true) override def toString() = s"[${id}] items=${items.reverse.mkString(",")} cancelled=${cancelled}" } //#
linearregression/eventuate
src/test/scala/com/rbmhtechnology/example/Order.scala
Scala
apache-2.0
1,125
import java.awt.Desktop import java.awt.image.BufferedImage import java.io.{FileOutputStream, File} import java.text.DecimalFormat import javafx.application.{Platform, Application} import javafx.concurrent.Task import javafx.event.{ActionEvent, EventHandler} import javafx.geometry.{Insets, Pos} import javafx.scene.Scene import javafx.scene.control.Alert.AlertType import javafx.scene.control._ import javafx.scene.layout._ import javafx.stage.{DirectoryChooser, FileChooser, Stage} import org.apache.pdfbox.pdmodel.{PDPage, PDDocument} import org.apache.pdfbox.util.ImageIOUtil import scala.collection.JavaConversions._ import scala.util.Try class Main extends Application { override def start(primaryStage: Stage): Unit = { primaryStage.setTitle("PDF to PNG") var loadedDoc: Option[(File, PDDocument)] = None var saveDirectory: Option[File] = None //choose pdf elements val choosePDFLabel = new Label("PDF:") val chooseButton = new Button("Browse for PDF") val pdfTextField = new TextField("choose a pdf") pdfTextField.setPrefWidth(300) pdfTextField.setEditable(false) //choose directory elements val chooseDirLabel = new Label("Save Directory:") val directoryButton = new Button("Save To") val directoryTextField = new TextField("choose a save directory") directoryTextField.setPrefWidth(300.0) directoryTextField.setEditable(false) //conversion elements val progress = new ProgressBar() val convert = new Button("Convert") val progressLabel = new Label("0/0") progress.setPrefWidth(400) progress.setVisible(false) progressLabel.setVisible(false) //output information label val outputLabel = new Label("") outputLabel.setVisible(false) outputLabel.setPadding(new Insets(0, 0, 0, 3)) //bind the choose pdf button chooseButton.setOnAction(new EventHandler[ActionEvent] { override def handle(event: ActionEvent): Unit = { //initialize the file chooser val fc = new FileChooser() fc.setTitle("Choose PDF") //open the file chooser val file = fc.showOpenDialog(primaryStage) //attempt to load the PDF val tryDoc = Try(PDDocument.load(file)).toOption //if the PDF is valid, set the label, otherwise give user an alert if(tryDoc.isDefined) { loadedDoc = Some((file, tryDoc.get)) pdfTextField.setText(file.getAbsolutePath) val parentDirectory = new File(file.getParent) if(parentDirectory.isDirectory){ saveDirectory = Some(parentDirectory) directoryTextField.setText(parentDirectory.getAbsolutePath) } } else { val alert = new Alert(AlertType.ERROR, "Not a Valid PDF") alert.show() } } }) //bind the choose directory element directoryButton.setOnAction(new EventHandler[ActionEvent] { override def handle(event: ActionEvent): Unit = { val dc = new DirectoryChooser() val dir = dc.showDialog(primaryStage) if(dir != null){ saveDirectory = Some(dir) directoryTextField.setText(dir.getAbsolutePath) } } }) //bind the convert button convert.setOnAction(new EventHandler[ActionEvent] { override def handle(event: ActionEvent): Unit = { if(loadedDoc.isDefined && saveDirectory.isDefined){ outputLabel.setVisible(false) progress.setVisible(true) progressLabel.setVisible(true) progress.setProgress(0) progressLabel.setText("0/0") //task to generate the JPEGs val task = new Task[Unit] { override def call(): Unit = { val dir = saveDirectory.get.getAbsolutePath val pdf = loadedDoc.get._2 val pages = pdf.getDocumentCatalog.getAllPages.toList val df = new DecimalFormat("0000") val prefix = loadedDoc.get._1.getName.stripSuffix(".pdf") pages.zipWithIndex.foreach({ case (page, index) => val image = page.asInstanceOf[PDPage].convertToImage(BufferedImage.TYPE_INT_RGB, 400) val filename = df.format(index) + ".jpg" val file = new File(dir + "/" + prefix + "-" + filename) val os = new FileOutputStream(file) ImageIOUtil.writeImage(image, "jpg", os , 800) os.flush() os.close() updateProgress(index + 1, pages.size) Platform.runLater(new Runnable { override def run(): Unit = { progressLabel.setText(s"${index + 1}/${pages.size}") progress.setProgress(getProgress) } }) }) Platform.runLater(new Runnable { override def run(): Unit = { outputLabel.setText(s"${pages.size} files were written to ${saveDirectory.get.getAbsolutePath}") outputLabel.setVisible(true) } }) Desktop.getDesktop.open(saveDirectory.get) } } val thread = new Thread(task, "pdf-to-jpg") thread.setDaemon(true) thread.start() } else { val alert = new Alert(AlertType.ERROR, "You must specify a PDF and Directory") alert.show() } } }) //setup the layout val root = new GridPane() root.setHgap(3.0) val choosePDFLabelTextField = new HBox() choosePDFLabelTextField.getChildren.addAll(choosePDFLabel, pdfTextField) choosePDFLabelTextField.setSpacing(3.0) choosePDFLabelTextField.setAlignment(Pos.CENTER_RIGHT) root.add(choosePDFLabelTextField, 0, 0) root.add(chooseButton, 1, 0) val chooseDirLabelTextField = new HBox() chooseDirLabelTextField.setSpacing(3.0) chooseDirLabelTextField.setAlignment(Pos.CENTER_RIGHT) chooseDirLabelTextField.getChildren.addAll(chooseDirLabel, directoryTextField) root.add(chooseDirLabelTextField, 0, 1) root.add(directoryButton, 1, 1) root.add(progress, 0, 2) val convertBtnLabel = new HBox() //we want to put the button and label in the same box convertBtnLabel.setSpacing(3.0) convertBtnLabel.setAlignment(Pos.BOTTOM_LEFT) convertBtnLabel.getChildren.addAll(convert, progressLabel) root.add(convertBtnLabel, 1, 2) root.add(outputLabel, 0, 3, 2, 2) primaryStage.setScene(new Scene(root)) primaryStage.show() } } object Main { def main (args: Array[String]){ Application.launch(classOf[Main], args: _*) } }
kfang/PDFtoJPEG
src/main/scala/Main.scala
Scala
apache-2.0
6,711
package com.lucidchart.relate.macros import com.lucidchart.relate._ import macrocompat.bundle import scala.reflect.macros.blackbox.Context @bundle class RowParserImpl(val c: Context) { import c.universe._ def generateSnakeImpl[A: c.WeakTypeTag]: Tree = generate[A](AnnotOpts(true, Map.empty)) def generateImpl[A: c.WeakTypeTag]: Tree = generate[A](AnnotOpts(false, Map.empty)) def generateMappingImpl[A: c.WeakTypeTag]( colMapping: c.Expr[Map[String, String]] ): Tree = { val q"$s(..$params)" = colMapping.tree generate[A](AnnotOpts(false, getRemapping(params))) } case class AnnotOpts(snakeCase: Boolean, remapping: Map[String, Tree]) def annotation(annottees: c.Expr[Any]*): Tree = { val validOptions = Set( "colMapping", "snakeCase" ) val opts: AnnotOpts = c.prefix.tree match { case q"new Record(..$params)" => val paramTrees: Map[String, Tree] = params.map { case q"$optNameAst -> $optValueAst" => val optName = optNameAst match { case Literal(Constant(optName: String)) => optName case name => c.abort(name.pos, "Keys must be literal strings") } if (!validOptions.contains(optName)) { c.abort(optNameAst.pos, s"$optName is an invalid option. Valid options: ${validOptions.mkString(", ")}") } optName -> optValueAst }.toMap if (paramTrees.contains("colMapping") && paramTrees.contains("snakeCase")) { c.abort(c.enclosingPosition, "Only one of snakeCase or colMapping can be supplied") } paramTrees.foldLeft(AnnotOpts(false, Map.empty)) { case (opts, (optName, optValueAst)) => optName match { case "colMapping" => optValueAst match { case q"Map[..$tpts](..$params)" => opts.copy(remapping = getRemapping(params)) } case "snakeCase" => optValueAst match { case q"true" => opts.copy(snakeCase = true) case q"false" => opts.copy(snakeCase = false) case value => c.abort(value.pos, "snakeCase requires a literal true or false value") } } } case q"new Record()" => AnnotOpts(false, Map.empty) } val inputs = annottees.map(_.tree).toList val result: List[Tree] = inputs match { case target @ q"case class $tpname[..$tparams] $ctorMods(...$paramss) extends { ..$earlydefns } with ..$parents { $self => ..$stats }" :: tail => val params = paramss.head val paramNames = params.map(_.name.toString).toSet opts.remapping.foreach { case (givenCol, tree) => if (!paramNames.contains(givenCol)) { c.abort(tree.pos, s"$givenCol is not a member of $tpname") } } val extractors = generateExtractors(params, opts) val existingCompanion = if (tail.isEmpty) { q"object ${tpname.toTermName} { }" } else { tail.head } val companion: Tree = existingCompanion match { case q"$mods object $tname extends { ..$earlydefns } with ..$parents { $self: $stype => ..$body }" => val typeName = tq"$tpname" q"""$mods object $tname extends { ..$earlydefns } with ..$parents { $self: $stype => ..$body implicit val relateRowParser: com.lucidchart.relate.RowParser[$typeName] = { ${newRowParser(typeName, extractors, q"$tname")} } }""" } List(target.head, companion) case _ => c.abort(c.enclosingPosition, "@Record must be used on a case class") } Block(result, Literal(Constant(()))) } private def generate[A: c.WeakTypeTag](opts: AnnotOpts): Tree = { val tpe = weakTypeTag[A].tpe val theApply = findApply(tpe) val params = theApply match { case Some(symbol) => symbol.paramLists.head case None => c.abort(c.enclosingPosition, "No apply function found") } val paramNames = params.map(_.name.toString).toSet opts.remapping.foreach { case (givenCol, tree) => if (!paramNames.contains(givenCol)) { c.abort(tree.pos, s"$givenCol is not a member of $tpe") } } val input = generateCalls(params.map(CallData.fromSymbol(_, opts))) val comp = q"${tpe.typeSymbol.companion}" val typeName = tq"${weakTypeTag[A].tpe}" newRowParser(typeName, input, comp) } private def newRowParser(tpe: Tree, extractors: List[Tree], comp: Tree): Tree = { q""" new com.lucidchart.relate.RowParser[$tpe] { def parse(row: com.lucidchart.relate.SqlRow): $tpe = { $comp(..$extractors) } } """ } private def tupleValueString(tupleTree: Tree): String = { val remapAst = tupleTree match { case q"$aa($colLit).$arrow[..$tpts]($remapAst)" => remapAst case q"$col -> $remapAst" => remapAst case q"($col, $remapAst)" => remapAst } remapAst match { case Literal(Constant(remap: String)) => remap case value => c.abort(value.pos, "Remappings must be literal strings") } } case class CallData(name: Literal, tpt: Type, args: List[Type], isOption: Boolean) object CallData { def fromSymbol(sym: Symbol, opts: AnnotOpts): CallData = { val value = if (opts.snakeCase) { toSnakeCase(sym.name.toString) } else if (opts.remapping.contains(sym.name.toString)) { tupleValueString(opts.remapping(sym.name.toString)) } else { sym.name.toString } val TypeRef(_, outerType, args) = sym.info val TypeRef(_, option, _) = typeOf[Option[Any]] CallData(Literal(Constant(value)), sym.info, args, outerType == option) } } private def generateCalls(callData: List[CallData]): List[Tree] = { callData.map { cd => if (cd.isOption) { q"row.opt[${cd.args.head}](${cd.name})" } else { q"row[${cd.tpt}](${cd.name})" } } } private def generateExtractors(params: List[ValDef], opts: AnnotOpts): List[Tree] = { params.map { param => val p = { val value = if (opts.snakeCase) { toSnakeCase(param.name.toString) } else if (opts.remapping.contains(param.name.toString)) { tupleValueString(opts.remapping(param.name.toString)) } else { param.name.toString } Literal(Constant(value)) } param.tpt match { case AppliedTypeTree(outer, ps) if outer.toString == "Option" => q"row.opt[${ps.head}]($p)" case tpt => q"row[$tpt]($p)" } } } private def toSnakeCase(s: String): String = s.replaceAll( "([A-Z]+)([A-Z][a-z])", "$1_$2" ).replaceAll( "([a-z\\\\d])([A-Z])", "$1_$2" ).toLowerCase private def findApply(target: Type): Option[MethodSymbol] = { val companion: Type = target.companion val unapplyReturnTypes = getUnapplyReturnTypes(companion) val applies = getApplies(companion) findApplyUnapplyMatch(companion, applies, unapplyReturnTypes) } private def getReturnTypes(args: List[Type]): Option[List[Type]] = { args.head match { case t @ TypeRef(_, _, Nil) => Some(List(t)) case t @ TypeRef(_, _, args) => if (t <:< typeOf[Product]) Some(args) else Some(List(t)) case _ => None } } private def getUnapplyReturnTypes(companion: Type): Option[List[Type]] = { val unapply = companion.decl(TermName("unapply")) val unapplySeq = companion.decl(TermName("unapplySeq")) val hasVarArgs = unapplySeq != NoSymbol val effectiveUnapply = Seq(unapply, unapplySeq).find(_ != NoSymbol) match { case None => c.abort(c.enclosingPosition, "No unapply or unapplySeq function found") case Some(s) => s.asMethod } effectiveUnapply.returnType match { case TypeRef(_, _, Nil) => c.abort(c.enclosingPosition, s"Unapply of $companion has no parameters. Are you using an empty case class?") None case TypeRef(_, _, args) => args.head match { case t @ TypeRef(_, _, Nil) => Some(List(t)) case t @ TypeRef(_, _, args) => import c.universe.definitions.TupleClass if (!TupleClass.seq.exists(tupleSym => t.baseType(tupleSym) ne NoType)) Some(List(t)) else if (t <:< typeOf[Product]) Some(args) else None case _ => None } case _ => None } } private def getApplies(companion: Type): List[Symbol] = { companion.decl(TermName("apply")) match { case NoSymbol => c.abort(c.enclosingPosition, "No apply function found") case s => s.asTerm.alternatives } } private def findApplyUnapplyMatch( companion: Type, applies: List[Symbol], unapplyReturnTypes: Option[List[Type]] ): Option[MethodSymbol] = { val unapply = companion.decl(TermName("unapply")) val unapplySeq = companion.decl(TermName("unapplySeq")) val hasVarArgs = unapplySeq != NoSymbol applies.collectFirst { case (apply: MethodSymbol) if hasVarArgs && { val someApplyTypes = apply.paramLists.headOption.map(_.map(_.asTerm.typeSignature)) val someInitApply = someApplyTypes.map(_.init) val someApplyLast = someApplyTypes.map(_.last) val someInitUnapply = unapplyReturnTypes.map(_.init) val someUnapplyLast = unapplyReturnTypes.map(_.last) val initsMatch = someInitApply == someInitUnapply val lastMatch = (for { lastApply <- someApplyLast lastUnapply <- someUnapplyLast } yield lastApply <:< lastUnapply).getOrElse(false) initsMatch && lastMatch } => apply case (apply: MethodSymbol) if apply.paramLists.headOption.map(_.map(_.asTerm.typeSignature)) == unapplyReturnTypes => apply } } private def expand(colLit: Tree, tree: Tree): (String, Tree) = { val col = colLit match { case Literal(Constant(col: String)) => col case _ => c.abort(colLit.pos, "Column names must be literal strings") } col -> tree } private def getRemapping(params: List[Tree]): Map[String, Tree] = { params.map { case tree @ q"$aa($colLit).$arrow[..$tpts]($remapLit)" => expand(colLit, tree) case tree @ q"$colLit -> $remapLit" => expand(colLit, tree) case tree @ q"($colLit, $remapLit)" => expand(colLit, tree) case tree => c.abort(tree.pos, "Remappings must be literal tuples") }.toMap } }
pauldraper/relate
macros/src/main/scala/com/lucidchart/open/relate/macros/RowParserImpl.scala
Scala
apache-2.0
10,517
package debop4s.data import java.util.concurrent.{LinkedBlockingQueue, ThreadPoolExecutor, TimeUnit} import debop4s.core._ import debop4s.core.concurrent._ import debop4s.data.slick3.SlickContext._ import debop4s.data.slick3.SlickContext.driver.api._ import org.reactivestreams.{Publisher, Subscriber, Subscription} import org.slf4j.LoggerFactory import scala.collection.generic.CanBuildFrom import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future, Promise} import scala.language.postfixOps import scala.util.control.NonFatal import scala.util.{Failure, Success} /** * package * @author [email protected] */ package object slick3 { private[this] lazy val log = LoggerFactory.getLogger(getClass) implicit class DatabaseExtensions(db: SlickContext.driver.backend.DatabaseDef) { /** 동기 λ°©μ‹μœΌλ‘œ action 을 μˆ˜ν–‰ν•©λ‹ˆλ‹€. */ def exec[@miniboxed R](action: DBIOAction[R, NoStream, Nothing]): R = { action.exec(db) // db.run(action).await } def result[E, U, Seq[_]](query: Query[E, U, Seq]): Seq[_] = { query.exec(db) } def result[@miniboxed T](query: Rep[T]): T = { query.exec(db) //db.run(query.result).await } /** * action 을 μˆœμ„œλŒ€λ‘œ μ²˜λ¦¬ν•©λ‹ˆλ‹€. * {{{ * Seq(action1, action2, action3).seq * // or * action1 >> action2 >> action3 * // or * action1 andThen action2 andThen action3 * }}} */ def seq[E <: Effect](actions: DBIOAction[_, NoStream, E]*) = { DBIO.seq(actions: _*).exec(db) } def withPinnedSession[E <: Effect](actions: DBIOAction[_, NoStream, E]*) = { DBIO.seq(actions: _*).withPinnedSession.exec(db) } def withTransaction[E <: Effect](actions: DBIOAction[_, NoStream, E]*) = { DBIO.seq(actions: _*).transactionally.exec(db) } // def withTransaction[@miniboxed R](block: Session => R) = { // using(db.createSession()) { s => // s.withTransaction { block(s) } // } // } /** * `Seq[ DBIO[R] ]` λ₯Ό `DBIO[ Seq[R] ]` 둜 λ³€ν™˜ν•˜μ—¬ dbμ—μ„œ μ‹€ν–‰ν•œ ν›„ Seq[R] 을 λ°˜ν™˜ν•©λ‹ˆλ‹€. * {{{ * Seq( * schema.create, * q1.result, * q2.result * ).seqeunce * * // returns Seq(Unit, q1.result, q2.result) * }}} */ def sequence[@miniboxed R, E <: Effect](in: DBIOAction[R, NoStream, E]*) (implicit cbf: CanBuildFrom[Seq[DBIOAction[R, NoStream, E]], R, Seq[R]]): Seq[R] = { DBIO.sequence(in).exec(db) // db.run(DBIO.sequence(in)).await } /** * action 듀을 λ³‘λ ¬λ‘œ μˆ˜ν–‰ν•©λ‹ˆλ‹€. * {{{ * Seq(action1, action2, action3).execPar * }}} */ // TODO: 이 ν•¨μˆ˜λŠ” κΌ­ SlickComponent 에도 μ œκ³΅ν•˜μž!!! 특히 autoCommitParallel λ˜λŠ” asParallel 둜 def execPar[E <: Effect](actions: DBIOAction[_, NoStream, E]*) = { val results: Seq[Future[Any]] = actions.par.map { action => db.run(action) }.seq results.awaitAll } } implicit class DBIOActionExtensions[R](action: DBIOAction[R, NoStream, _]) { /** * {{{ * val query = users.map(u=>(u.id, u.name)) * val userList = query.exec * }}} * @return */ def exec(implicit db: SlickContext.driver.backend.DatabaseDef = defaultDB): R = { db.run(action).await } } implicit class DBIOActionSeqExtensions[R](actions: Seq[DBIOAction[R, NoStream, _]]) { /** * {{{ * val query = users.map(u=>(u.id, u.name)) * val userList = query.exec * }}} * @return */ def exec(implicit db: SlickContext.driver.backend.DatabaseDef = defaultDB) = { db.run(DBIO.seq(actions: _*)).stay } } implicit class DBIOStreamActionExtensions[T](action: DBIOAction[_, Streaming[T], _]) { def stream(implicit db: SlickContext.driver.backend.DatabaseDef = defaultDB) = { db.stream(action) } } implicit class RepExtensions[T](r: Rep[T]) { /** * {{{ * val count = users.length.exec * }}} * @return */ def exec(implicit db: SlickContext.driver.backend.DatabaseDef = defaultDB): T = { db.run(r.result).await } } }
debop/debop4s
debop4s-data-slick3/src/main/scala/debop4s/data/slick3/package.scala
Scala
apache-2.0
4,255
package com.azavea.opentransit import com.azavea.opentransit.service.OpenTransitServiceActor import com.azavea.opentransit.database._ import akka.actor.{ActorSystem, Props} import akka.io.IO import com.typesafe.config.{ConfigFactory,Config} import spray.can.Http import scala.slick.jdbc.JdbcBackend._ import scala.slick.jdbc.{StaticQuery => Q} object Main { val actorSystem = ActorSystem("opentransit") val rasterCache = RasterCache(actorSystem) // This call sets all processing jobs to an failed state def failLeftoverJobs(): Unit = { val jobsTable = new IndicatorJobsTable {} val dbi = new ProductionDatabaseInstance {} dbi.db withSession { implicit session: Session => jobsTable.failOOMError } } def main(args: Array[String]) { // Set any incomplete jobs to errored out failLeftoverJobs // We need an ActorSystem to host our service implicit val system = actorSystem // Create our service actor val service = system.actorOf(Props[OpenTransitServiceActor], "opentransit-service") // Bind our actor to HTTP IO(Http) ! Http.Bind(service, interface = "0.0.0.0", port = ConfigFactory.load.getInt("opentransit.spray.port")) } }
flibbertigibbet/open-transit-indicators
scala/opentransit/src/main/scala/com/azavea/opentransit/Main.scala
Scala
gpl-3.0
1,203
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.h2o.sparkling.ml.algos import ai.h2o.sparkling.{SharedH2OTestContext, TestUtils} import org.apache.spark.ml.{Pipeline, PipelineModel} import org.apache.spark.sql.SparkSession import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner import org.scalatest.{FunSuite, Matchers} @RunWith(classOf[JUnitRunner]) class H2OIsolationForestTestSuite extends FunSuite with Matchers with SharedH2OTestContext { override def createSparkSession(): SparkSession = sparkSession("local[*]") private lazy val trainingDataset = spark.read .option("inferSchema", "true") .csv(TestUtils.locate("smalldata/anomaly/ecg_discord_train.csv")) private lazy val testingDataset = spark.read .option("inferSchema", "true") .csv(TestUtils.locate("smalldata/anomaly/ecg_discord_test.csv")) test("H2OIsolationForest Pipeline serialization and deserialization") { val algo = new H2OIsolationForest() val pipeline = new Pipeline().setStages(Array(algo)) pipeline.write.overwrite().save("ml/build/isolation_forest_pipeline") val loadedPipeline = Pipeline.load("ml/build/isolation_forest_pipeline") val model = loadedPipeline.fit(trainingDataset) val expected = model.transform(testingDataset) model.write.overwrite().save("ml/build/isolation_forest_pipeline_model") val loadedModel = PipelineModel.load("ml/build/isolation_forest_pipeline_model") val result = loadedModel.transform(testingDataset) TestUtils.assertDataFramesAreIdentical(expected, result) } }
h2oai/sparkling-water
ml/src/test/scala/ai/h2o/sparkling/ml/algos/H2OIsolationForestTestSuite.scala
Scala
apache-2.0
2,324
package com.rouesnel.typedsql.hive import java.util.Date import com.rouesnel.typedsql.core._ import com.rouesnel.typedsql.udf.{PlaceholderUDF, UdfDescription} import com.rouesnel.typedsql.util import org.apache.hadoop.hive.conf.HiveConf import org.apache.hadoop.hive.metastore.api._ import org.apache.hadoop.hive.ql.Driver import org.apache.hadoop.hive.ql.exec.FunctionRegistry import org.apache.hadoop.hive.ql.parse._ import org.apache.hadoop.hive.ql.session.SessionState import scala.language.experimental.macros import scala.util.control.NonFatal import scalaz.Scalaz._ import scalaz._ /** Provides helpers for parsing/manipulating Hive queries */ object HiveQuery { import scala.collection.convert.decorateAsJava._ /** * Parses a SELECT query using the Hive Parser. */ def parseSelect(query: String): String \\/ ASTNode = { try { val pd = new ParseDriver() \\/.right(pd.parseSelect(query, null)) } catch { case ex: Exception => \\/.left(s"Error parsing the sql query: ${ex.getMessage}") } } /** * Compiles a Hive Query and returns the Hive Schema * * @param hiveConf conf corresponding to a local instance (see HiveSupport) * @param sources the other tables/schemas that should be available * @param parameterVariables map of parameter names to default values to use for compilation * @param query query to compile * @return error or the compiled Hive Schema */ def compileQuery(hiveConf: HiveConf, sources: Map[String, (StructType, List[(String, HiveType)])], parameterVariables: Map[String, String], udfs: List[UdfDescription], query: String): Throwable \\/ Schema = HiveSupport.useHiveClassloader { val driver = new Driver(hiveConf) try { compileQuery(driver, hiveConf, sources, parameterVariables, udfs, query) } finally { driver.close() driver.destroy() } } /** * Compiles a Hive Query and returns the Hive Schema using the provided driver. * * This version is available to support long-running operations where the driver is externally * managed. * * @param driver the Hive driver to use for compilation * @param hiveConf conf corresponding to a local instance (see HiveSupport) * @param sources the other tables/schemas that should be available * @param parameterVariables map of parameter names to default values to use for compilation * @param query query to compile * @return error or the compiled Hive Schema */ def compileQuery(driver: Driver, hiveConf: HiveConf, sources: Map[String, (StructType, List[(String, HiveType)])], parameterVariables: Map[String, String], udfs: List[UdfDescription], query: String): Throwable \\/ Schema = HiveSupport.useHiveClassloader { SessionState.start(hiveConf) SessionState.get().setIsSilent(true) val dbName = s"test_${new Date().getTime}" // Create the compilation environment createCompilationEnvironment(dbName, hiveConf, sources) // Initialise the variable substitution val sourceVariables = sources.keys.map(tableName => tableName -> s"${dbName}.${tableName}").toMap val variables = (sourceVariables ++ parameterVariables).asJava // Need to synchronize here because the functions and placeholder UDFs are global. PlaceholderUDF.synchronized { \\/.fromTryCatchNonFatal { SessionState.get().setHiveVariables(variables) PlaceholderUDF .configurePlaceholders(udfs)((udf, udfClazz) => FunctionRegistry.registerTemporaryFunction(udf.name, udfClazz)) .fold(err => throw new Exception(err), identity) // Run the query. driver.init() driver.compile(query) val result = driver.getSchema() udfs.foreach(udf => FunctionRegistry.unregisterTemporaryUDF(udf.name)) result } } } /** * Creates tables in the specified database with the corresponding schema for each source table. * * Taken/inspired by the corresponding functions in [Beeswax](https://github.com/CommBank/beeswax). * See https://github.com/CommBank/beeswax/blob/master/src/main/scala/au/com/cba/omnia/beeswax/HiveMetadataTable.scala */ def createCompilationEnvironment( dbName: String, hiveConf: HiveConf, sources: Map[String, (StructType, List[(String, HiveType)])]) = { import au.com.cba.omnia.beeswax._ Hive .createDatabase(dbName) .flatMap(_ => { Hive.getConfClient.map({ case (conf, client) => sources.toList.map({ case (tableName, (tableSchema, partitionCols)) => { val table = new Table() table.setDbName(dbName) table.setTableName(tableName) val sd = new StorageDescriptor() tableSchema.fields.toList.foreach({ case (fieldName, fieldType) => { val fieldSchema = new FieldSchema(fieldName, fieldType.hiveType, "Table in compilation environment") sd.addToCols(fieldSchema) } }) val partitionFieldSchemas = partitionCols.map({ case (fieldName, fieldType) => { new FieldSchema(fieldName, fieldType.hiveType, "Table in compilation environment") } }) if (partitionFieldSchemas.nonEmpty) { table.setPartitionKeys(partitionFieldSchemas.asJava) table.setPartitionKeysIsSet(true) } table.setSd(sd) ParquetFormat.applyFormat(table) try { client.createTable(table) Hive.value(true) } catch { case NonFatal(t) => Hive.error( s"Failed to create table $tableName in compilation environment (db = ${dbName})", t) } } }) }) }) .run(hiveConf) } }
laurencer/typedsql
macro/src/main/scala/com/rouesnel/typedsql/hive/HiveQuery.scala
Scala
apache-2.0
6,502