prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>composition_query.py<|end_file_name|><|fim▁begin|>from citrination_client.search.pif.query.chemical.chemical_field_operation import ChemicalFieldOperation from citrination_client.search.pif.query.core.base_object_query import BaseObjectQuery from citrination_client.search.pif.query.core.field_operation import FieldOperation class CompositionQuery(BaseObjectQuery): """ Class to query against a PIF Composition object. """ def __init__(self, element=None, actual_weight_percent=None, actual_atomic_percent=None, ideal_weight_percent=None, ideal_atomic_percent=None, logic=None, tags=None, length=None, offset=None): """ Constructor. :param element: One or more :class:`ChemicalFieldOperation` operations against the element field. :param actual_weight_percent: One or more :class:`FieldOperation` operations against the actual weight percent field. :param actual_atomic_percent: One or more :class:`FieldOperation` operations against the actual atomic percent field. :param ideal_weight_percent: One or more :class:`FieldOperation` operations against the ideal weight percent field. :param ideal_atomic_percent: One or more :class:`FieldOperation` operations against the ideal atomic percent field. :param logic: Logic for this filter. Must be equal to one of "MUST", "MUST_NOT", "SHOULD", or "OPTIONAL". :param tags: One or more :class:`FieldOperation` operations against the tags field. :param length: One or more :class:`FieldOperation` operations against the length field. :param offset: One or more :class:`FieldOperation` operations against the offset field. """ super(CompositionQuery, self).__init__(logic=logic, tags=tags, length=length, offset=offset) self._element = None self.element = element self._actual_weight_percent = None self.actual_weight_percent = actual_weight_percent self._actual_atomic_percent = None self.actual_atomic_percent = actual_atomic_percent self._ideal_weight_percent = None self.ideal_weight_percent = ideal_weight_percent self._ideal_atomic_percent = None self.ideal_atomic_percent = ideal_atomic_percent @property def element(self): return self._element @element.setter def element(self, element): self._element = self._get_object(ChemicalFieldOperation, element) @element.deleter def element(self): self._element = None @property def actual_weight_percent(self): return self._actual_weight_percent @actual_weight_percent.setter def actual_weight_percent(self, actual_weight_percent): self._actual_weight_percent = self._get_object(FieldOperation, actual_weight_percent) @actual_weight_percent.deleter def actual_weight_percent(self): self._actual_weight_percent = None @property def actual_atomic_percent(self): return self._actual_atomic_percent @actual_atomic_percent.setter def actual_atomic_percent(self, actual_atomic_percent): self._actual_atomic_percent = self._get_object(FieldOperation, actual_atomic_percent) @actual_atomic_percent.deleter def actual_atomic_percent(self): self._actual_atomic_percent = None @property def ideal_weight_percent(self): return self._ideal_weight_percent @ideal_weight_percent.setter def ideal_weight_percent(self, ideal_weight_percent): self._ideal_weight_percent = self._get_object(FieldOperation, ideal_weight_percent) @ideal_weight_percent.deleter def ideal_weight_percent(self): self._ideal_weight_percent = None <|fim▁hole|> @ideal_atomic_percent.setter def ideal_atomic_percent(self, ideal_atomic_percent): self._ideal_atomic_percent = self._get_object(FieldOperation, ideal_atomic_percent) @ideal_atomic_percent.deleter def ideal_atomic_percent(self): self._ideal_atomic_percent = None<|fim▁end|>
@property def ideal_atomic_percent(self): return self._ideal_atomic_percent
<|file_name|>command.go<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2017 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package cli import ( "errors" "flag" "fmt" "log" "path/filepath" "strconv" "strings" "github.com/chzyer/readline" "github.com/mysteriumnetwork/node/cmd" "github.com/mysteriumnetwork/node/core/service" "github.com/mysteriumnetwork/node/metadata" "github.com/mysteriumnetwork/node/services/noop" "github.com/mysteriumnetwork/node/services/openvpn" openvpn_service "github.com/mysteriumnetwork/node/services/openvpn/service" "github.com/mysteriumnetwork/node/services/wireguard" wireguard_service "github.com/mysteriumnetwork/node/services/wireguard/service" tequilapi_client "github.com/mysteriumnetwork/node/tequilapi/client" "github.com/mysteriumnetwork/node/utils" "github.com/urfave/cli" ) const cliCommandName = "cli" const serviceHelp = `service <action> [args] start <ProviderID> <ServiceType> [options] stop <ServiceID> list status <ServiceID> example: service start 0x7d5ee3557775aed0b85d691b036769c17349db23 openvpn --openvpn.port=1194 --openvpn.proto=UDP` // NewCommand constructs CLI based Mysterium UI with possibility to control quiting func NewCommand() *cli.Command { return &cli.Command{ Name: cliCommandName, Usage: "Starts a CLI client with a Tequilapi", Action: func(ctx *cli.Context) error { nodeOptions := cmd.ParseFlagsNode(ctx) cmdCLI := &cliApp{ historyFile: filepath.Join(nodeOptions.Directories.Data, ".cli_history"), tequilapi: tequilapi_client.NewClient(nodeOptions.TequilapiAddress, nodeOptions.TequilapiPort), } cmd.RegisterSignalCallback(utils.SoftKiller(cmdCLI.Kill)) return cmdCLI.Run() }, } } // cliApp describes CLI based Mysterium UI type cliApp struct { historyFile string tequilapi *tequilapi_client.Client fetchedProposals []tequilapi_client.ProposalDTO completer *readline.PrefixCompleter reader *readline.Instance } const redColor = "\033[31m%s\033[0m" const identityDefaultPassphrase = "" const statusConnected = "Connected" var versionSummary = metadata.VersionAsSummary(metadata.LicenseCopyright( "type 'license --warranty'", "type 'license --conditions'", )) // Run runs CLI interface synchronously, in the same thread while blocking it func (c *cliApp) Run() (err error) { fmt.Println(versionSummary) c.fetchedProposals = c.fetchProposals() c.completer = newAutocompleter(c.tequilapi, c.fetchedProposals) c.reader, err = readline.NewEx(&readline.Config{ Prompt: fmt.Sprintf(redColor, "» "), HistoryFile: c.historyFile, AutoComplete: c.completer, InterruptPrompt: "^C", EOFPrompt: "exit", }) if err != nil { return err } // TODO Should overtake output of CommandRun log.SetOutput(c.reader.Stderr()) for { line, err := c.reader.Readline() if err == readline.ErrInterrupt && len(line) > 0 { continue } else if err != nil { c.quit() return err } c.handleActions(line) } } // Kill stops cli func (c *cliApp) Kill() error { c.reader.Clean() return c.reader.Close() } func (c *cliApp) handleActions(line string) { line = strings.TrimSpace(line) staticCmds := []struct { command string handler func() }{ {"exit", c.quit}, {"quit", c.quit}, {"help", c.help}, {"status", c.status}, {"healthcheck", c.healthcheck}, {"ip", c.ip}, {"disconnect", c.disconnect}, {"stop", c.stopClient}, } argCmds := []struct { command string handler func(argsString string) }{ {command: "connect", handler: c.connect}, {command: "unlock", handler: c.unlock}, {command: "identities", handler: c.identities}, {command: "version", handler: c.version}, {command: "license", handler: c.license}, {command: "registration", handler: c.registration}, {command: "proposals", handler: c.proposals}, {command: "service", handler: c.service}, } for _, cmd := range staticCmds { if line == cmd.command { cmd.handler() return } } for _, cmd := range argCmds { if strings.HasPrefix(line, cmd.command) { argsString := strings.TrimSpace(line[len(cmd.command):]) cmd.handler(argsString) return } } if len(line) > 0 { c.help() } } func (c *cliApp) service(argsString string) { args := strings.Fields(argsString) if len(args) == 0 { fmt.Println(serviceHelp) return } action := args[0] switch action { case "start": if len(args) < 3 { fmt.Println(serviceHelp) return } c.serviceStart(args[1], args[2], args[3:]...) case "stop": if len(args) < 2 { fmt.Println(serviceHelp) return } c.serviceStop(args[1]) case "status": if len(args) < 2 { fmt.Println(serviceHelp) return } c.serviceGet(args[1]) case "list": c.serviceList() default: info(fmt.Sprintf("Unknown action provided: %s", action)) fmt.Println(serviceHelp) } } func (c *cliApp) serviceStart(providerID, serviceType string, args ...string) { opts, err := parseServiceOptions(serviceType, args...) if err != nil { info("Failed to parse service options:", err) return } service, err := c.tequilapi.ServiceStart(providerID, serviceType, opts) if err != nil { info("Failed to start service: ", err) return } status(service.Status, "ID: "+service.ID, "ProviderID: "+service.Proposal.ProviderID, "Type: "+service.Proposal.ServiceType) } func (c *cliApp) serviceStop(id string) { if err := c.tequilapi.ServiceStop(id); err != nil { info("Failed to stop service: ", err) return } status("Stopping", "ID: "+id) } func (c *cliApp) serviceList() { services, err := c.tequilapi.Services() if err != nil { info("Failed to get a list of services: ", err) return } for _, service := range services { status(service.Status, "ID: "+service.ID, "ProviderID: "+service.Proposal.ProviderID, "Type: "+service.Proposal.ServiceType) } } func (c *cliApp) serviceGet(id string) { service, err := c.tequilapi.Service(id) if err != nil { info("Failed to get service info: ", err) return } status(service.Status, "ID: "+service.ID, "ProviderID: "+service.Proposal.ProviderID, "Type: "+service.Proposal.ServiceType) } func (c *cliApp) connect(argsString string) { args := strings.Fields(argsString) if len(args) < 3 { info("Please type in the provider identity. Connect <consumer-identity> <provider-identity> <service-type> [disable-kill-switch]") return } consumerID, providerID, serviceType := args[0], args[1], args[2] var disableKill bool var err error if len(args) > 3 { disableKillStr := args[3] disableKill, err = strconv.ParseBool(disableKillStr) if err != nil { info("Please use true / false for <disable-kill-switch>") return } } connectOptions := tequilapi_client.ConnectOptions{DisableKillSwitch: disableKill} if consumerID == "new" { id, err := c.tequilapi.NewIdentity(identityDefaultPassphrase) if err != nil { warn(err) return } consumerID = id.Address success("New identity created:", consumerID) } status("CONNECTING", "from:", consumerID, "to:", providerID) _, err = c.tequilapi.Connect(consumerID, providerID, serviceType, connectOptions) if err != nil { warn(err) return } success("Connected.") } func (c *cliApp) unlock(argsString string) { unlockSignature := "Unlock <identity> [passphrase]" if len(argsString) == 0 { info("Press tab to select identity.", unlockSignature) return } args := strings.Fields(argsString) var identity, passphrase string if len(args) == 1 { identity, passphrase = args[0], "" } else if len(args) == 2 { identity, passphrase = args[0], args[1] } else { info("Please type in identity and optional passphrase.", unlockSignature) return } info("Unlocking", identity) err := c.tequilapi.Unlock(identity, passphrase) if err != nil { warn(err) return } success(fmt.Sprintf("Identity %s unlocked.", identity)) } func (c *cliApp) disconnect() { err := c.tequilapi.Disconnect() if err != nil { warn(err) return } success("Disconnected.") } func (c *cliApp) status() { status, err := c.tequilapi.Status() if err != nil { warn(err) } else { info("Status:", status.Status) info("SID:", status.SessionID) } if status.Status == statusConnected { statistics, err := c.tequilapi.ConnectionStatistics() if err != nil { warn(err) } else { info("Proposal:", status.Proposal) info(fmt.Sprintf("Connection duration: %ds", statistics.Duration)) info("Bytes sent:", statistics.BytesSent) info("Bytes received:", statistics.BytesReceived) } } } func (c *cliApp) healthcheck() { healthcheck, err := c.tequilapi.Healthcheck() if err != nil { warn(err) return } info(fmt.Sprintf("Uptime: %v", healthcheck.Uptime)) info(fmt.Sprintf("Process: %v", healthcheck.Process)) info(fmt.Sprintf("Version: %v", healthcheck.Version)) buildString := metadata.FormatString(healthcheck.BuildInfo.Commit, healthcheck.BuildInfo.Branch, healthcheck.BuildInfo.BuildNumber) info(buildString) } func (c *cliApp) proposals(filter string) { proposals := c.fetchProposals() c.fetchedProposals = proposals filterMsg := "" if filter != "" { filterMsg = fmt.Sprintf("(filter: '%s')", filter) } info(fmt.Sprintf("Found %v proposals %s", len(proposals), filterMsg)) for _, proposal := range proposals { country := proposal.ServiceDefinition.LocationOriginate.Country if country == "" { country = "Unknown" } msg := fmt.Sprintf("- provider id: %v, proposal id: %v, country: %v", proposal.ProviderID, proposal.ID, country) if filter == "" || strings.Contains(proposal.ProviderID, filter) || strings.Contains(country, filter) { info(msg) } } } func (c *cliApp) fetchProposals() []tequilapi_client.ProposalDTO { proposals, err := c.tequilapi.Proposals() if err != nil { warn(err) return []tequilapi_client.ProposalDTO{} } return proposals } func (c *cliApp) ip() { ip, err := c.tequilapi.GetIP() if err != nil { warn(err) return } info("IP:", ip) } func (c *cliApp) help() { info("Mysterium CLI tequilapi commands:") fmt.Println(c.completer.Tree(" ")) } // quit stops cli and client commands and exits application func (c *cliApp) quit() { stop := utils.SoftKiller(c.Kill) stop() } func (c *cliApp) identities(argsString string) { const usage = "identities command:\n list\n new [passphrase]" if len(argsString) == 0 { info(usage) return } switch argsString { case "new", "list": // Known sub-commands. default: warnf("Unknown sub-command '%s'\n", argsString) fmt.Println(usage) return } args := strings.Fields(argsString) if len(args) < 1 { info(usage) return } action := args[0] if action == "list" { if len(args) > 1 { info(usage) return } ids, err := c.tequilapi.GetIdentities() if err != nil { fmt.Println("Error occurred:", err) return } for _, id := range ids { status("+", id.Address) } return } if action == "new" { var passphrase string if len(args) == 1 { passphrase = identityDefaultPassphrase } else if len(args) == 2 { passphrase = args[1] } else { info(usage) return } id, err := c.tequilapi.NewIdentity(passphrase) if err != nil { warn(err) return } success("New identity created:", id.Address) } } func (c *cliApp) registration(argsString string) { if argsString == "" { warn("Please supply identity") return } status, err := c.tequilapi.IdentityRegistrationStatus(argsString) if err != nil { warn("Something went wrong: ", err) return } if status.Registered { info("Already registered") return } info("Identity is not registered yet. In order to do that - please call payments contract with the following data") info("Public key: part1 ->", status.PublicKey.Part1) info(" part2 ->", status.PublicKey.Part2) info("Signature: S ->", status.Signature.S) info(" R ->", status.Signature.R) info(" V ->", status.Signature.V) info("OR proceed with direct link:") infof(" https://wallet.mysterium.network/?part1=%s&part2=%s&s=%s&r=%s&v=%d\n", status.PublicKey.Part1, status.PublicKey.Part2, status.Signature.S, status.Signature.R, status.Signature.V) } func (c *cliApp) stopClient() { err := c.tequilapi.Stop() if err != nil { warn("Cannot stop client:", err) } success("Client stopped") } func (c *cliApp) version(argsString string) { fmt.Println(versionSummary) } func (c *cliApp) license(argsString string) { if argsString == "warranty" { fmt.Print(metadata.LicenseWarranty) } else if argsString == "conditions" { fmt.Print(metadata.LicenseConditions) } else { info("identities command:\n warranty\n conditions") } } func getIdentityOptionList(tequilapi *tequilapi_client.Client) func(string) []string { return func(line string) []string { identities := []string{"new"} ids, err := tequilapi.GetIdentities() if err != nil { warn(err) return identities } for _, id := range ids { identities = append(identities, id.Address) } return identities } } func getProposalOptionList(proposals []tequilapi_client.ProposalDTO) func(string) []string { return func(line string) []string { var providerIDS []string for _, proposal := range proposals { providerIDS = append(providerIDS, proposal.ProviderID) } return providerIDS } } func newAutocompleter(tequilapi *tequilapi_client.Client, proposals []tequilapi_client.ProposalDTO) *readline.PrefixCompleter { return readline.NewPrefixCompleter( readline.PcItem( "connect", readline.PcItemDynamic( getIdentityOptionList(tequilapi), readline.PcItemDynamic( getProposalOptionList(proposals), ), ), ), readline.PcItem( "service", readline.PcItem("start", readline.PcItemDynamic( getIdentityOptionList(tequilapi), readline.PcItem("noop"), readline.PcItem("openvpn"), readline.PcItem("wireguard"), )), readline.PcItem("stop"), readline.PcItem("list"), readline.PcItem("status"), ), readline.PcItem( "identities", readline.PcItem("new"), readline.PcItem("list"), ), readline.PcItem("status"), readline.PcItem("healthcheck"), readline.PcItem("proposals"), readline.PcItem("ip"), readline.PcItem("disconnect"), readline.PcItem("help"), readline.PcItem("quit"), readline.PcItem("stop"), readline.PcItem( "unlock", readline.PcItemDynamic( getIdentityOptionList(tequilapi), ), ), readline.PcItem( "license", readline.PcItem("warranty"), readline.PcItem("conditions"), ), readline.PcItem( "registration", readline.PcItemDynamic( getIdentityOptionList(tequilapi), ), ), ) }<|fim▁hole|> openvpn_service.RegisterFlags(&flags) wireguard_service.RegisterFlags(&flags) set := flag.NewFlagSet("", flag.ContinueOnError) for _, f := range flags { f.Apply(set) } if err := set.Parse(args); err != nil { return nil, err } ctx := cli.NewContext(nil, set, nil) switch serviceType { case noop.ServiceType: return noop.ParseFlags(ctx), nil case wireguard.ServiceType: return wireguard_service.ParseFlags(ctx), nil case openvpn.ServiceType: return openvpn_service.ParseFlags(ctx), nil } return nil, errors.New("service type not found") }<|fim▁end|>
func parseServiceOptions(serviceType string, args ...string) (service.Options, error) { var flags []cli.Flag
<|file_name|>Enemy.java<|end_file_name|><|fim▁begin|>package sprites; public class Enemy extends Paddle { public Enemy(int x, int y) { super(x,y); } int updateFrameCounter = 0; float moveDirection = 0; public void update(float dt, Ball ball) { //if(++updateFrameCounter%3==0) //{ updateFrameCounter = 0; if(position.y < ball.position.y) moveDirection = 1; else if (position.y > ball.position.y) moveDirection = -1; else moveDirection = 0;<|fim▁hole|> position.add(velocity.x, velocity.y); bounds.setPosition(position.x, position.y); } }<|fim▁end|>
//} setVVelocity(moveDirection, dt);
<|file_name|>fsevents-with-hack.js<|end_file_name|><|fim▁begin|><|fim▁hole|>var chokidar = require('chokidar'); process.chdir(__dirname + '/../'); chokidar.watch('.', {ignoreInitial: true, useFsEvents: true}).on('all', function(event, path) { console.log(new Date, event, path); });<|fim▁end|>
<|file_name|>int_log.rs<|end_file_name|><|fim▁begin|>//! This tests the `Integer::{log,log2,log10}` methods. These tests are in a //! separate file because there's both a large number of them, and not all tests //! can be run on Android. This is because in Android `log2` uses an imprecise //! approximation:https://github.com/rust-lang/rust/blob/4825e12fc9c79954aa0fe18f5521efa6c19c7539/src/libstd/sys/unix/android.rs#L27-L53 #[test] fn checked_log() { assert_eq!(999u32.checked_log(10), Some(2)); assert_eq!(1000u32.checked_log(10), Some(3)); assert_eq!(555u32.checked_log(13), Some(2)); assert_eq!(63u32.checked_log(4), Some(2)); assert_eq!(64u32.checked_log(4), Some(3)); assert_eq!(10460353203u64.checked_log(3), Some(21)); assert_eq!(10460353202u64.checked_log(3), Some(20)); assert_eq!(147808829414345923316083210206383297601u128.checked_log(3), Some(80)); assert_eq!(147808829414345923316083210206383297600u128.checked_log(3), Some(79)); assert_eq!(22528399544939174411840147874772641u128.checked_log(19683), Some(8)); assert_eq!(22528399544939174411840147874772631i128.checked_log(19683), Some(7)); assert_eq!(0u8.checked_log(4), None); assert_eq!(0u16.checked_log(4), None); assert_eq!(0i8.checked_log(4), None); assert_eq!(0i16.checked_log(4), None); for i in i16::MIN..=0 { assert_eq!(i.checked_log(4), None); } for i in 1..=i16::MAX { assert_eq!(i.checked_log(13), Some((i as f32).log(13.0) as u32)); } for i in 1..=u16::MAX { assert_eq!(i.checked_log(13), Some((i as f32).log(13.0) as u32)); } } #[test] fn checked_log2() { assert_eq!(5u32.checked_log2(), Some(2)); assert_eq!(0u64.checked_log2(), None); assert_eq!(128i32.checked_log2(), Some(7)); assert_eq!((-55i16).checked_log2(), None); assert_eq!(0u8.checked_log2(), None); assert_eq!(0u16.checked_log2(), None); assert_eq!(0i8.checked_log2(), None); assert_eq!(0i16.checked_log2(), None); for i in 1..=u8::MAX { assert_eq!(i.checked_log2(), Some((i as f32).log2() as u32)); } for i in 1..=u16::MAX { // Guard against Android's imprecise f32::log2 implementation. if i != 8192 && i != 32768 { assert_eq!(i.checked_log2(), Some((i as f32).log2() as u32)); } } for i in i8::MIN..=0 { assert_eq!(i.checked_log2(), None); } for i in 1..=i8::MAX { assert_eq!(i.checked_log2(), Some((i as f32).log2() as u32)); } for i in i16::MIN..=0 { assert_eq!(i.checked_log2(), None); } for i in 1..=i16::MAX { // Guard against Android's imprecise f32::log2 implementation. if i != 8192 { assert_eq!(i.checked_log2(), Some((i as f32).log2() as u32)); } } } // Validate cases that fail on Android's imprecise float log2 implementation. #[test] #[cfg(not(target_os = "android"))] fn checked_log2_not_android() { assert_eq!(8192u16.checked_log2(), Some((8192f32).log2() as u32)); assert_eq!(32768u16.checked_log2(), Some((32768f32).log2() as u32)); assert_eq!(8192i16.checked_log2(), Some((8192f32).log2() as u32)); } #[test] fn checked_log10() { assert_eq!(0u8.checked_log10(), None); assert_eq!(0u16.checked_log10(), None); assert_eq!(0i8.checked_log10(), None); assert_eq!(0i16.checked_log10(), None); for i in i16::MIN..=0 { assert_eq!(i.checked_log10(), None); } for i in 1..=i16::MAX { assert_eq!(i.checked_log10(), Some((i as f32).log10() as u32)); } for i in 1..=u16::MAX { assert_eq!(i.checked_log10(), Some((i as f32).log10() as u32)); } for i in 1..=100_000u32 { assert_eq!(i.checked_log10(), Some((i as f32).log10() as u32)); } } macro_rules! log10_loop { ($T:ty, $log10_max:expr) => { assert_eq!(<$T>::MAX.log10(), $log10_max); for i in 0..=$log10_max { let p = (10 as $T).pow(i as u32); if p >= 10 { assert_eq!((p - 9).log10(), i - 1); assert_eq!((p - 1).log10(), i - 1); } assert_eq!(p.log10(), i); assert_eq!((p + 1).log10(), i); if p >= 10 { assert_eq!((p + 9).log10(), i); } // also check `x.log(10)` if p >= 10 { assert_eq!((p - 9).log(10), i - 1); assert_eq!((p - 1).log(10), i - 1); } assert_eq!(p.log(10), i); assert_eq!((p + 1).log(10), i); if p >= 10 { assert_eq!((p + 9).log(10), i); } } }; } <|fim▁hole|>fn log10_u8() { log10_loop! { u8, 2 } } #[test] fn log10_u16() { log10_loop! { u16, 4 } } #[test] fn log10_u32() { log10_loop! { u32, 9 } } #[test] fn log10_u64() { log10_loop! { u64, 19 } } #[test] fn log10_u128() { log10_loop! { u128, 38 } }<|fim▁end|>
#[test]
<|file_name|>console_play.rs<|end_file_name|><|fim▁begin|>use clap::App; use std::default::Default; use thud_game; use thud_ui_common; use thud_ui_console; fn main() { let mut agents = thud_ui_common::agent_registry::AgentRegistry::new(); agents .register(Box::new( thud_ui_common::agent_registry::StdinAgentBuilder::new(), )) .register(Box::new( thud_ui_common::agent_registry::mcts::MctsAgentBuilder::new("mcts1"), )) .register(Box::new( thud_ui_common::agent_registry::mcts::MctsAgentBuilder::new("mcts2"), )) .register(Box::new( thud_ui_common::agent_registry::FileAgentBuilder::new("file_agent"), )); // Set up arg handling. let matches = {<|fim▁hole|> let mut app = thud_ui_common::set_args( App::new("console_play") .version("0.1.0") .author("Stu Black <[email protected]>") .about("Play Thud on the console"), &[ thud_ui_common::FLAG_INITIAL_BOARD, thud_ui_common::FLAG_INITIAL_PLAYER, thud_ui_common::FLAG_LOG_LEVEL, ], ); app = agents.register_args(app); app.get_matches() }; let initial_cells = match matches .value_of(thud_ui_common::FLAG_INITIAL_BOARD) .map(|x| x.parse::<thud_ui_common::InitialBoard>()) { None => thud_game::board::Cells::default(), Some(Ok(x)) => x.cells(), Some(Err(e)) => panic!("Bad initial board configuration: {}", e), }; let logging_level = match matches .value_of(thud_ui_common::FLAG_LOG_LEVEL) .map(|x| x.parse::<log::LevelFilter>()) { Some(Ok(x)) => x, Some(Err(_)) => panic!( "Bad logging level '{}'", matches.value_of(thud_ui_common::FLAG_LOG_LEVEL).unwrap() ), None => log::LevelFilter::Info, }; // Set up logging. thud_ui_common::init::init_logger(logging_level); let mut agent1 = agents.get_player_1_from_arguments(&matches).unwrap(); let mut agent2 = agents.get_player_2_from_arguments(&matches).unwrap(); let mut state = thud_game::state::State::new( initial_cells, &thud_game::board::TRANSPOSITIONAL_EQUIVALENCE, ); while !state.terminated() { println!("state: {:?}", state); let action = agent1.propose_action(&state).unwrap(); println!("agent 1 proposes action: {:?}", action); state.do_action(&action); println!("state: {:?}", state); if state.terminated() { break; } let action = agent2.propose_action(&state).unwrap(); println!("agent 2 proposes action: {:?}", action); state.do_action(&action); } println!("state has terminated: {:?}", state); println!( "final score: dwarfs {}, trolls {}", state.score(thud_game::Role::Dwarf), state.score(thud_game::Role::Troll) ); // // Prompt for play. // loop { // println!( // "{:?} player's turn. Enter coordinate of piece to move.", // human_role // ); // let c = thud_ui_console::prompt_for_piece(state.cells(), human_role); // let piece_actions: Vec<thud_game::Action> = state.position_actions(c).collect(); // if piece_actions.is_empty() { // println!("Piece at {:?} has no actions.", c); // } else { // if let Some(action) = thud_ui_console::select_one(&piece_actions) { // let mut moved_state = state.clone(); // moved_state.do_action(&action); // println!( // "After action, board: {}", // thud_game::board::format_board(moved_state.cells()) // ); // println!("Is this okay?"); // match thud_ui_console::select_one(&["y", "n"]) { // Some(&"y") => { // state = moved_state; // graph.retain_reachable_from(&[&state]); // break; // } // _ => continue, // } // } // } // } }<|fim▁end|>
<|file_name|>logs.component.ts<|end_file_name|><|fim▁begin|>import { Component, AfterViewInit, OnInit, ChangeDetectorRef } from '@angular/core'; import { Title } from '@angular/platform-browser'; import { TdLoadingService, TdMediaService } from '@covalent/core'; import { ItemsService, ProductsService } from '../../services'; @Component({<|fim▁hole|>}) export class LogsComponent implements AfterViewInit, OnInit { items: Object[]; products: Object[]; constructor(private _titleService: Title, private _itemsService: ItemsService, private _productsService: ProductsService, private _loadingService: TdLoadingService, private _changeDetectorRef: ChangeDetectorRef, public media: TdMediaService) { } ngOnInit(): void { this._titleService.setTitle( 'Covalent Logs' ); this._loadingService.register('items.load'); this._itemsService.query().subscribe((items: Object[]) => { this.items = items; setTimeout(() => { this._loadingService.resolve('items.load'); }, 2000); }, (error: Error) => { this._itemsService.staticQuery().subscribe((items: Object[]) => { this.items = items; setTimeout(() => { this._loadingService.resolve('items.load'); }, 2000); }); }); this._loadingService.register('products.load'); this._productsService.query().subscribe((products: Object[]) => { this.products = products; setTimeout(() => { this._loadingService.resolve('products.load'); }, 2000); }); } ngAfterViewInit(): void { // broadcast to all listener observables when loading the page this.media.broadcast(); // force a new change detection cycle since change detections // have finished when `ngAfterViewInit` is executed this._changeDetectorRef.detectChanges(); } }<|fim▁end|>
selector: 'ag-logs', templateUrl: './logs.component.html', styleUrls: ['./logs.component.scss'], viewProviders: [ ItemsService, ProductsService ],
<|file_name|>_hoverlabel.py<|end_file_name|><|fim▁begin|>import _plotly_utils.basevalidators class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator): def __init__(self, plotly_name="hoverlabel", parent_name="scattermapbox", **kwargs): super(HoverlabelValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, data_class_str=kwargs.pop("data_class_str", "Hoverlabel"), data_docs=kwargs.pop( "data_docs", """ align Sets the horizontal alignment of the text content within hover label box. Has an effect only if the hover label text spans more two or more lines alignsrc Sets the source reference on Chart Studio Cloud for align . bgcolor Sets the background color of the hover labels for this trace bgcolorsrc Sets the source reference on Chart Studio Cloud for bgcolor . bordercolor Sets the border color of the hover labels for this trace.<|fim▁hole|> Sets the font used in hover labels. namelength Sets the default length (in number of characters) of the trace name in the hover labels for all traces. -1 shows the whole name regardless of length. 0-3 shows the first 0-3 characters, and an integer >3 will show the whole name if it is less than that many characters, but if it is longer, will truncate to `namelength - 3` characters and add an ellipsis. namelengthsrc Sets the source reference on Chart Studio Cloud for namelength . """, ), **kwargs )<|fim▁end|>
bordercolorsrc Sets the source reference on Chart Studio Cloud for bordercolor . font
<|file_name|>repo.go<|end_file_name|><|fim▁begin|>// Copyright 2014 The Gogs Authors. All rights reserved. // Copyright 2020 The Gitea Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repo import ( "fmt" "net/url" "os" "path" "strings" "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/auth" "code.gitea.io/gitea/modules/base" "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/git" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/migrations" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/task" "code.gitea.io/gitea/modules/util" repo_service "code.gitea.io/gitea/services/repository" "github.com/unknwon/com" ) const ( tplCreate base.TplName = "repo/create" tplMigrate base.TplName = "repo/migrate" ) // MustBeNotEmpty render when a repo is a empty git dir func MustBeNotEmpty(ctx *context.Context) { if ctx.Repo.Repository.IsEmpty { ctx.NotFound("MustBeNotEmpty", nil) } } // MustBeEditable check that repo can be edited func MustBeEditable(ctx *context.Context) { if !ctx.Repo.Repository.CanEnableEditor() || ctx.Repo.IsViewCommit { ctx.NotFound("", nil) return } } // MustBeAbleToUpload check that repo can be uploaded to func MustBeAbleToUpload(ctx *context.Context) { if !setting.Repository.Upload.Enabled { ctx.NotFound("", nil) } } func checkContextUser(ctx *context.Context, uid int64) *models.User { orgs, err := models.GetOrgsCanCreateRepoByUserID(ctx.User.ID) if err != nil { ctx.ServerError("GetOrgsCanCreateRepoByUserID", err) return nil } if !ctx.User.IsAdmin { orgsAvailable := []*models.User{} for i := 0; i < len(orgs); i++ { if orgs[i].CanCreateRepo() { orgsAvailable = append(orgsAvailable, orgs[i]) } } ctx.Data["Orgs"] = orgsAvailable } else { ctx.Data["Orgs"] = orgs } // Not equal means current user is an organization. if uid == ctx.User.ID || uid == 0 { return ctx.User } org, err := models.GetUserByID(uid) if models.IsErrUserNotExist(err) { return ctx.User } if err != nil { ctx.ServerError("GetUserByID", fmt.Errorf("[%d]: %v", uid, err)) return nil } // Check ownership of organization. if !org.IsOrganization() { ctx.Error(403) return nil } if !ctx.User.IsAdmin { canCreate, err := org.CanCreateOrgRepo(ctx.User.ID) if err != nil { ctx.ServerError("CanCreateOrgRepo", err) return nil } else if !canCreate { ctx.Error(403) return nil } } else { ctx.Data["Orgs"] = orgs } return org } func getRepoPrivate(ctx *context.Context) bool { switch strings.ToLower(setting.Repository.DefaultPrivate) { case setting.RepoCreatingLastUserVisibility: return ctx.User.LastRepoVisibility case setting.RepoCreatingPrivate: return true case setting.RepoCreatingPublic: return false default: return ctx.User.LastRepoVisibility } } // Create render creating repository page func Create(ctx *context.Context) { ctx.Data["Title"] = ctx.Tr("new_repo") // Give default value for template to render. ctx.Data["Gitignores"] = models.Gitignores ctx.Data["LabelTemplates"] = models.LabelTemplates ctx.Data["Licenses"] = models.Licenses ctx.Data["Readmes"] = models.Readmes ctx.Data["readme"] = "Default" ctx.Data["private"] = getRepoPrivate(ctx) ctx.Data["IsForcedPrivate"] = setting.Repository.ForcePrivate ctx.Data["default_branch"] = setting.Repository.DefaultBranch ctxUser := checkContextUser(ctx, ctx.QueryInt64("org")) if ctx.Written() { return } ctx.Data["ContextUser"] = ctxUser ctx.Data["repo_template_name"] = ctx.Tr("repo.template_select") templateID := ctx.QueryInt64("template_id") if templateID > 0 { templateRepo, err := models.GetRepositoryByID(templateID) if err == nil && templateRepo.CheckUnitUser(ctxUser.ID, ctxUser.IsAdmin, models.UnitTypeCode) { ctx.Data["repo_template"] = templateID ctx.Data["repo_template_name"] = templateRepo.Name } } if !ctx.User.CanCreateRepo() { ctx.RenderWithErr(ctx.Tr("repo.form.reach_limit_of_creation", ctx.User.MaxCreationLimit()), tplCreate, nil) } else { ctx.HTML(200, tplCreate) } } func handleCreateError(ctx *context.Context, owner *models.User, err error, name string, tpl base.TplName, form interface{}) { switch { case models.IsErrReachLimitOfRepo(err): ctx.RenderWithErr(ctx.Tr("repo.form.reach_limit_of_creation", owner.MaxCreationLimit()), tpl, form) case models.IsErrRepoAlreadyExist(err): ctx.Data["Err_RepoName"] = true ctx.RenderWithErr(ctx.Tr("form.repo_name_been_taken"), tpl, form) case models.IsErrNameReserved(err): ctx.Data["Err_RepoName"] = true ctx.RenderWithErr(ctx.Tr("repo.form.name_reserved", err.(models.ErrNameReserved).Name), tpl, form) case models.IsErrNamePatternNotAllowed(err): ctx.Data["Err_RepoName"] = true ctx.RenderWithErr(ctx.Tr("repo.form.name_pattern_not_allowed", err.(models.ErrNamePatternNotAllowed).Pattern), tpl, form) default: ctx.ServerError(name, err) } } // CreatePost response for creating repository func CreatePost(ctx *context.Context, form auth.CreateRepoForm) { ctx.Data["Title"] = ctx.Tr("new_repo") ctx.Data["Gitignores"] = models.Gitignores ctx.Data["LabelTemplates"] = models.LabelTemplates ctx.Data["Licenses"] = models.Licenses ctx.Data["Readmes"] = models.Readmes ctxUser := checkContextUser(ctx, form.UID) if ctx.Written() { return } ctx.Data["ContextUser"] = ctxUser if ctx.HasError() { ctx.HTML(200, tplCreate) return } var repo *models.Repository var err error if form.RepoTemplate > 0 { opts := models.GenerateRepoOptions{ Name: form.RepoName, Description: form.Description, Private: form.Private, GitContent: form.GitContent, Topics: form.Topics, GitHooks: form.GitHooks, Webhooks: form.Webhooks, Avatar: form.Avatar, IssueLabels: form.Labels, } if !opts.IsValid() { ctx.RenderWithErr(ctx.Tr("repo.template.one_item"), tplCreate, form) return } templateRepo := getRepository(ctx, form.RepoTemplate) if ctx.Written() { return } if !templateRepo.IsTemplate { ctx.RenderWithErr(ctx.Tr("repo.template.invalid"), tplCreate, form) return } repo, err = repo_service.GenerateRepository(ctx.User, ctxUser, templateRepo, opts) if err == nil { log.Trace("Repository generated [%d]: %s/%s", repo.ID, ctxUser.Name, repo.Name) ctx.Redirect(setting.AppSubURL + "/" + ctxUser.Name + "/" + repo.Name) return } } else { repo, err = repo_service.CreateRepository(ctx.User, ctxUser, models.CreateRepoOptions{<|fim▁hole|> Gitignores: form.Gitignores, IssueLabels: form.IssueLabels, License: form.License, Readme: form.Readme, IsPrivate: form.Private || setting.Repository.ForcePrivate, DefaultBranch: form.DefaultBranch, AutoInit: form.AutoInit, }) if err == nil { log.Trace("Repository created [%d]: %s/%s", repo.ID, ctxUser.Name, repo.Name) ctx.Redirect(setting.AppSubURL + "/" + ctxUser.Name + "/" + repo.Name) return } } handleCreateError(ctx, ctxUser, err, "CreatePost", tplCreate, &form) } // Migrate render migration of repository page func Migrate(ctx *context.Context) { ctx.Data["Title"] = ctx.Tr("new_migrate") ctx.Data["private"] = getRepoPrivate(ctx) ctx.Data["IsForcedPrivate"] = setting.Repository.ForcePrivate ctx.Data["DisableMirrors"] = setting.Repository.DisableMirrors ctx.Data["mirror"] = ctx.Query("mirror") == "1" ctx.Data["wiki"] = ctx.Query("wiki") == "1" ctx.Data["milestones"] = ctx.Query("milestones") == "1" ctx.Data["labels"] = ctx.Query("labels") == "1" ctx.Data["issues"] = ctx.Query("issues") == "1" ctx.Data["pull_requests"] = ctx.Query("pull_requests") == "1" ctx.Data["releases"] = ctx.Query("releases") == "1" ctx.Data["LFSActive"] = setting.LFS.StartServer ctxUser := checkContextUser(ctx, ctx.QueryInt64("org")) if ctx.Written() { return } ctx.Data["ContextUser"] = ctxUser ctx.HTML(200, tplMigrate) } func handleMigrateError(ctx *context.Context, owner *models.User, err error, name string, tpl base.TplName, form *auth.MigrateRepoForm) { switch { case migrations.IsRateLimitError(err): ctx.RenderWithErr(ctx.Tr("form.visit_rate_limit"), tpl, form) case migrations.IsTwoFactorAuthError(err): ctx.RenderWithErr(ctx.Tr("form.2fa_auth_required"), tpl, form) case models.IsErrReachLimitOfRepo(err): ctx.RenderWithErr(ctx.Tr("repo.form.reach_limit_of_creation", owner.MaxCreationLimit()), tpl, form) case models.IsErrRepoAlreadyExist(err): ctx.Data["Err_RepoName"] = true ctx.RenderWithErr(ctx.Tr("form.repo_name_been_taken"), tpl, form) case models.IsErrNameReserved(err): ctx.Data["Err_RepoName"] = true ctx.RenderWithErr(ctx.Tr("repo.form.name_reserved", err.(models.ErrNameReserved).Name), tpl, form) case models.IsErrNamePatternNotAllowed(err): ctx.Data["Err_RepoName"] = true ctx.RenderWithErr(ctx.Tr("repo.form.name_pattern_not_allowed", err.(models.ErrNamePatternNotAllowed).Pattern), tpl, form) default: remoteAddr, _ := form.ParseRemoteAddr(owner) err = util.URLSanitizedError(err, remoteAddr) if strings.Contains(err.Error(), "Authentication failed") || strings.Contains(err.Error(), "Bad credentials") || strings.Contains(err.Error(), "could not read Username") { ctx.Data["Err_Auth"] = true ctx.RenderWithErr(ctx.Tr("form.auth_failed", err.Error()), tpl, form) } else if strings.Contains(err.Error(), "fatal:") { ctx.Data["Err_CloneAddr"] = true ctx.RenderWithErr(ctx.Tr("repo.migrate.failed", err.Error()), tpl, form) } else { ctx.ServerError(name, err) } } } // MigratePost response for migrating from external git repository func MigratePost(ctx *context.Context, form auth.MigrateRepoForm) { ctx.Data["Title"] = ctx.Tr("new_migrate") ctxUser := checkContextUser(ctx, form.UID) if ctx.Written() { return } ctx.Data["ContextUser"] = ctxUser if ctx.HasError() { ctx.HTML(200, tplMigrate) return } remoteAddr, err := form.ParseRemoteAddr(ctx.User) if err != nil { if models.IsErrInvalidCloneAddr(err) { ctx.Data["Err_CloneAddr"] = true addrErr := err.(models.ErrInvalidCloneAddr) switch { case addrErr.IsURLError: ctx.RenderWithErr(ctx.Tr("form.url_error"), tplMigrate, &form) case addrErr.IsPermissionDenied: ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied"), tplMigrate, &form) case addrErr.IsInvalidPath: ctx.RenderWithErr(ctx.Tr("repo.migrate.invalid_local_path"), tplMigrate, &form) default: ctx.ServerError("Unknown error", err) } } else { ctx.ServerError("ParseRemoteAddr", err) } return } var gitServiceType = structs.PlainGitService u, err := url.Parse(form.CloneAddr) if err == nil && strings.EqualFold(u.Host, "github.com") { gitServiceType = structs.GithubService } var opts = migrations.MigrateOptions{ OriginalURL: form.CloneAddr, GitServiceType: gitServiceType, CloneAddr: remoteAddr, RepoName: form.RepoName, Description: form.Description, Private: form.Private || setting.Repository.ForcePrivate, Mirror: form.Mirror && !setting.Repository.DisableMirrors, AuthUsername: form.AuthUsername, AuthPassword: form.AuthPassword, Wiki: form.Wiki, Issues: form.Issues, Milestones: form.Milestones, Labels: form.Labels, Comments: true, PullRequests: form.PullRequests, Releases: form.Releases, } if opts.Mirror { opts.Issues = false opts.Milestones = false opts.Labels = false opts.Comments = false opts.PullRequests = false opts.Releases = false } err = models.CheckCreateRepository(ctx.User, ctxUser, opts.RepoName) if err != nil { handleMigrateError(ctx, ctxUser, err, "MigratePost", tplMigrate, &form) return } err = task.MigrateRepository(ctx.User, ctxUser, opts) if err == nil { ctx.Redirect(setting.AppSubURL + "/" + ctxUser.Name + "/" + opts.RepoName) return } handleMigrateError(ctx, ctxUser, err, "MigratePost", tplMigrate, &form) } // Action response for actions to a repository func Action(ctx *context.Context) { var err error switch ctx.Params(":action") { case "watch": err = models.WatchRepo(ctx.User.ID, ctx.Repo.Repository.ID, true) case "unwatch": err = models.WatchRepo(ctx.User.ID, ctx.Repo.Repository.ID, false) case "star": err = models.StarRepo(ctx.User.ID, ctx.Repo.Repository.ID, true) case "unstar": err = models.StarRepo(ctx.User.ID, ctx.Repo.Repository.ID, false) case "desc": // FIXME: this is not used if !ctx.Repo.IsOwner() { ctx.Error(404) return } ctx.Repo.Repository.Description = ctx.Query("desc") ctx.Repo.Repository.Website = ctx.Query("site") err = models.UpdateRepository(ctx.Repo.Repository, false) } if err != nil { ctx.ServerError(fmt.Sprintf("Action (%s)", ctx.Params(":action")), err) return } ctx.RedirectToFirst(ctx.Query("redirect_to"), ctx.Repo.RepoLink) } // RedirectDownload return a file based on the following infos: func RedirectDownload(ctx *context.Context) { var ( vTag = ctx.Params("vTag") fileName = ctx.Params("fileName") ) tagNames := []string{vTag} curRepo := ctx.Repo.Repository releases, err := models.GetReleasesByRepoIDAndNames(models.DefaultDBContext(), curRepo.ID, tagNames) if err != nil { if models.IsErrAttachmentNotExist(err) { ctx.Error(404) return } ctx.ServerError("RedirectDownload", err) return } if len(releases) == 1 { release := releases[0] att, err := models.GetAttachmentByReleaseIDFileName(release.ID, fileName) if err != nil { ctx.Error(404) return } if att != nil { ctx.Redirect(att.DownloadURL()) return } } ctx.Error(404) } // Download download an archive of a repository func Download(ctx *context.Context) { var ( uri = ctx.Params("*") refName string ext string archivePath string archiveType git.ArchiveType ) switch { case strings.HasSuffix(uri, ".zip"): ext = ".zip" archivePath = path.Join(ctx.Repo.GitRepo.Path, "archives/zip") archiveType = git.ZIP case strings.HasSuffix(uri, ".tar.gz"): ext = ".tar.gz" archivePath = path.Join(ctx.Repo.GitRepo.Path, "archives/targz") archiveType = git.TARGZ default: log.Trace("Unknown format: %s", uri) ctx.Error(404) return } refName = strings.TrimSuffix(uri, ext) if !com.IsDir(archivePath) { if err := os.MkdirAll(archivePath, os.ModePerm); err != nil { ctx.ServerError("Download -> os.MkdirAll(archivePath)", err) return } } // Get corresponding commit. var ( commit *git.Commit err error ) gitRepo := ctx.Repo.GitRepo if gitRepo.IsBranchExist(refName) { commit, err = gitRepo.GetBranchCommit(refName) if err != nil { ctx.ServerError("GetBranchCommit", err) return } } else if gitRepo.IsTagExist(refName) { commit, err = gitRepo.GetTagCommit(refName) if err != nil { ctx.ServerError("GetTagCommit", err) return } } else if len(refName) >= 4 && len(refName) <= 40 { commit, err = gitRepo.GetCommit(refName) if err != nil { ctx.NotFound("GetCommit", nil) return } } else { ctx.NotFound("Download", nil) return } archivePath = path.Join(archivePath, base.ShortSha(commit.ID.String())+ext) if !com.IsFile(archivePath) { if err := commit.CreateArchive(archivePath, git.CreateArchiveOpts{ Format: archiveType, Prefix: setting.Repository.PrefixArchiveFiles, }); err != nil { ctx.ServerError("Download -> CreateArchive "+archivePath, err) return } } ctx.ServeFile(archivePath, ctx.Repo.Repository.Name+"-"+refName+ext) } // Status returns repository's status func Status(ctx *context.Context) { task, err := models.GetMigratingTask(ctx.Repo.Repository.ID) if err != nil { ctx.JSON(500, map[string]interface{}{ "err": err, }) return } ctx.JSON(200, map[string]interface{}{ "status": ctx.Repo.Repository.Status, "err": task.Errors, }) }<|fim▁end|>
Name: form.RepoName, Description: form.Description,
<|file_name|>opaque_pointer.rs<|end_file_name|><|fim▁begin|>#![allow( dead_code, non_snake_case, non_camel_case_types, non_upper_case_globals )] /// <div rustbindgen opaque></div> #[repr(C)] #[repr(align(4))] #[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)] pub struct OtherOpaque { pub _bindgen_opaque_blob: u32, } #[test] fn bindgen_test_layout_OtherOpaque() { assert_eq!( ::std::mem::size_of::<OtherOpaque>(), 4usize, concat!("Size of: ", stringify!(OtherOpaque)) ); assert_eq!( ::std::mem::align_of::<OtherOpaque>(), 4usize, concat!("Alignment of ", stringify!(OtherOpaque)) ); } /// <div rustbindgen opaque></div> #[repr(C)] #[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)] pub struct Opaque { pub _address: u8, } #[repr(C)] #[derive(Debug, Copy, Clone, Hash, PartialEq)] pub struct WithOpaquePtr { pub whatever: *mut u8, pub other: u32, pub t: OtherOpaque, } #[test] fn bindgen_test_layout_WithOpaquePtr() { assert_eq!( ::std::mem::size_of::<WithOpaquePtr>(), 16usize, concat!("Size of: ", stringify!(WithOpaquePtr)) ); assert_eq!( ::std::mem::align_of::<WithOpaquePtr>(), 8usize, concat!("Alignment of ", stringify!(WithOpaquePtr)) ); assert_eq!(<|fim▁hole|> as usize }, 0usize, concat!( "Offset of field: ", stringify!(WithOpaquePtr), "::", stringify!(whatever) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<WithOpaquePtr>())).other as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(WithOpaquePtr), "::", stringify!(other) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<WithOpaquePtr>())).t as *const _ as usize }, 12usize, concat!( "Offset of field: ", stringify!(WithOpaquePtr), "::", stringify!(t) ) ); } impl Default for WithOpaquePtr { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::<Self>::uninit(); unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } }<|fim▁end|>
unsafe { &(*(::std::ptr::null::<WithOpaquePtr>())).whatever as *const _
<|file_name|>pakbase.py<|end_file_name|><|fim▁begin|>""" pakbase module This module contains the base package class from which all of the other packages inherit from. """ from __future__ import print_function import os import webbrowser as wb import numpy as np from numpy.lib.recfunctions import stack_arrays from .modflow.mfparbc import ModflowParBc as mfparbc from .utils import Util2d, Util3d, Transient2d, MfList, check class Package(object): """ Base package class from which most other packages are derived. """ def __init__(self, parent, extension='glo', name='GLOBAL', unit_number=1, extra='', filenames=None, allowDuplicates=False): """ Package init """ self.parent = parent # To be able to access the parent modflow object's attributes if (not isinstance(extension, list)): extension = [extension] self.extension = [] self.file_name = [] for idx, e in enumerate(extension): self.extension.append(e) file_name = self.parent.name + '.' + e if filenames is not None: try: if filenames[idx] is not None: file_name = filenames[idx] except: pass self.file_name.append(file_name) self.fn_path = os.path.join(self.parent.model_ws, self.file_name[0]) if (not isinstance(name, list)): name = [name] self.name = name if (not isinstance(unit_number, list)): unit_number = [unit_number] self.unit_number = unit_number if (not isinstance(extra, list)): self.extra = len(self.unit_number) * [extra] else: self.extra = extra self.url = 'index.html' self.allowDuplicates = allowDuplicates self.acceptable_dtypes = [int, np.float32, str] return def __repr__(self): s = self.__doc__ exclude_attributes = ['extension', 'heading', 'name', 'parent', 'url'] for attr, value in sorted(self.__dict__.items()): if not (attr in exclude_attributes): if (isinstance(value, list)): if (len(value) == 1): s = s + ' {0:s} = {1:s}\n'.format(attr, str(value[0])) else: s = s + ' {0:s} (list, items = {1:d}\n'.format(attr, len( value)) elif (isinstance(value, np.ndarray)): s = s + ' {0:s} (array, shape = {1:s})\n'.format(attr, value.shape.__str__()[ 1:-1]) else: s = s + ' {0:s} = {1:s} ({2:s})\n'.format(attr, str(value), str(type(value))[ 7:-2]) return s def __getitem__(self, item): if hasattr(self, 'stress_period_data'): # added this check because stress_period_data also used in Oc and Oc88 but is not a MfList if isinstance(item, MfList): if not isinstance(item, list) and not isinstance(item, tuple): assert item in list( self.stress_period_data.data.keys()), "package.__getitem__() kper " + str( item) + " not in data.keys()" return self.stress_period_data[item] else: if item[1] not in self.dtype.names: raise Exception( "package.__getitem(): item \'" + item + "\' not in dtype names " + str( self.dtype.names)) assert item[0] in list( self.stress_period_data.data.keys()), "package.__getitem__() kper " + str( item[0]) + " not in data.keys()" if self.stress_period_data.vtype[item[0]] == np.recarray: return self.stress_period_data[item[0]][item[1]] def __setitem__(self, key, value): raise NotImplementedError("package.__setitem__() not implemented") def __setattr__(self, key, value): var_dict = vars(self) if key in list(var_dict.keys()): old_value = var_dict[key] if isinstance(old_value, Util2d): value = Util2d(self.parent, old_value.shape, old_value.dtype, value, name=old_value.name, fmtin=old_value.format.fortran, locat=old_value.locat, array_free_format=old_value.format.array_free_format) elif isinstance(old_value, Util3d): value = Util3d(self.parent, old_value.shape, old_value.dtype, value, name=old_value.name_base, fmtin=old_value.fmtin, locat=old_value.locat, array_free_format=old_value.array_free_format) elif isinstance(old_value, Transient2d): value = Transient2d(self.parent, old_value.shape, old_value.dtype, value, name=old_value.name_base, fmtin=old_value.fmtin, locat=old_value.locat) elif isinstance(old_value, MfList): value = MfList(self, dtype=old_value.dtype, data=value) elif isinstance(old_value, list): if len(old_value) > 0: if isinstance(old_value[0], Util3d): new_list = [] for vo, v in zip(old_value, value): new_list.append(Util3d(self.parent, vo.shape, vo.dtype, v, name=vo.name_base, fmtin=vo.fmtin, locat=vo.locat)) value = new_list elif isinstance(old_value[0], Util2d): new_list = [] for vo, v in zip(old_value, value): new_list.append(Util2d(self.parent, vo.shape, vo.dtype, v, name=vo.name, fmtin=vo.fmtin, locat=vo.locat)) value = new_list super(Package, self).__setattr__(key, value) def export(self, f, **kwargs): from flopy import export return export.utils.package_helper(f, self, **kwargs) @staticmethod def add_to_dtype(dtype, field_names, field_types): if not isinstance(field_names, list): field_names = [field_names] if not isinstance(field_types, list): field_types = [field_types] * len(field_names) newdtypes = [dtype] for field_name, field_type in zip(field_names, field_types): tempdtype = np.dtype([(field_name, field_type)]) newdtypes.append(tempdtype) newdtype = sum((dtype.descr for dtype in newdtypes), []) newdtype = np.dtype(newdtype) return newdtype def check(self, f=None, verbose=True, level=1): """ Check package data for common errors. Parameters ---------- f : str or file handle String defining file name or file handle for summary file of check method output. If a sting is passed a file handle is created. If f is None, check method does not write results to a summary file. (default is None) verbose : bool Boolean flag used to determine if check method results are written to the screen level : int Check method analysis level. If level=0, summary checks are performed. If level=1, full checks are performed. Returns ------- None Examples -------- >>> import flopy >>> m = flopy.modflow.Modflow.load('model.nam') >>> m.dis.check() """ chk = None if self.__dict__.get('stress_period_data', None) is not None and \ self.name[0] != 'OC': spd_inds_valid = True chk = check(self, f=f, verbose=verbose, level=level) for per in self.stress_period_data.data.keys(): if isinstance(self.stress_period_data.data[per], np.recarray): spd = self.stress_period_data.data[per] inds = (spd.k, spd.i, spd.j) if self.parent.structured \ else (spd.node) # General BC checks # check for valid cell indices spd_inds_valid = chk._stress_period_data_valid_indices(spd) # first check for and list nan values chk._stress_period_data_nans(spd) if spd_inds_valid: # next check for BCs in inactive cells chk._stress_period_data_inactivecells(spd) # More specific BC checks # check elevations in the ghb, drain, and riv packages if self.name[0] in check.bc_stage_names.keys(): # check that bc elevations are above model cell bottoms # also checks for nan values elev_name = chk.bc_stage_names[self.name[0]] botms = self.parent.dis.botm.array[inds] chk.stress_period_data_values(spd, spd[elev_name] < botms, col=elev_name, error_name='BC elevation below cell bottom', error_type='Error') chk.summarize() # check property values in upw and lpf packages elif self.name[0] in ['UPW', 'LPF']: chk = check(self, f=f, verbose=verbose, level=level) active = chk.get_active() # check for confined layers above convertable layers confined = False thickstrt = False for option in self.options: if option.lower() == 'thickstrt': thickstrt = True for i, l in enumerate(self.laytyp.array.tolist()): if l == 0 or l < 0 and thickstrt: confined = True continue if confined and l > 0: chk._add_to_summary(type='Warning', desc='\r LAYTYP: unconfined (convertible) ' + \ 'layer below confined layer') # check for zero or negative values of hydraulic conductivity, anisotropy, # and quasi-3D confining beds kparams = {'hk': 'horizontal hydraulic conductivity', 'vka': 'vertical hydraulic conductivity'} for kp, name in kparams.items(): chk.values(self.__dict__[kp].array, active & (self.__dict__[kp].array <= 0), 'zero or negative {} values'.format(name), 'Error') # check for negative hani chk.values(self.__dict__['hani'].array, active & (self.__dict__['hani'].array < 0), 'negative horizontal anisotropy values', 'Error') def check_thresholds(array, active, thresholds, name): """Checks array against min and max threshold values.""" mn, mx = thresholds chk.values(array, active & (array < mn), '{} values below checker threshold of {}' .format(name, mn), 'Warning') chk.values(array, active & (array > mx), '{} values above checker threshold of {}' .format(name, mx), 'Warning') # check for unusually high or low values of hydraulic conductivity if self.layvka.sum() > 0: # convert vertical anistropy to Kv for checking vka = self.vka.array.copy() for l in range(vka.shape[0]): vka[l] *= self.hk.array[l] if self.layvka.array[ l] != 0 else 1 check_thresholds(vka, active, chk.property_threshold_values['vka'], kparams.pop('vka')) for kp, name in kparams.items(): check_thresholds(self.__dict__[kp].array, active, chk.property_threshold_values[kp], name) # check vkcb if there are any quasi-3D layers if self.parent.dis.laycbd.sum() > 0: # pad non-quasi-3D layers in vkcb array with ones so they won't fail checker vkcb = self.vkcb.array.copy() for l in range(self.vkcb.shape[0]): if self.parent.dis.laycbd[l] == 0: vkcb[l, :, :] = 1 # assign 1 instead of zero as default value that won't violate checker # (allows for same structure as other checks) chk.values(vkcb, active & (vkcb <= 0), 'zero or negative quasi-3D confining bed Kv values', 'Error') check_thresholds(vkcb, active, chk.property_threshold_values['vkcb'], 'quasi-3D confining bed Kv') if not np.all(self.parent.dis.steady): # only check storage if model is transient # do the same for storage if the model is transient sarrays = {'ss': self.ss.array, 'sy': self.sy.array} if 'STORAGECOEFFICIENT' in self.options: # convert to specific for checking chk._add_to_summary(type='Warning', desc='\r STORAGECOEFFICIENT option is activated, \ storage values are read storage coefficients') sarrays['ss'] /= self.parent.dis.thickness.array sarrays['sy'] /= self.parent.dis.thickness.array chk.values(sarrays['ss'], active & (sarrays['ss'] < 0), 'zero or negative specific storage values', 'Error') check_thresholds(sarrays['ss'], active, chk.property_threshold_values['ss'], 'specific storage') # only check specific yield for convertible layers inds = np.array( [True if l > 0 or l < 0 and 'THICKSRT' in self.options else False for l in self.laytyp]) sarrays['sy'] = sarrays['sy'][inds, :, :] active = active[inds, :, :] chk.values(sarrays['sy'], active & (sarrays['sy'] < 0), 'zero or negative specific yield values', 'Error') check_thresholds(sarrays['sy'], active, chk.property_threshold_values['sy'], 'specific yield') chk.summarize() else: txt = 'check method not implemented for {} Package.'.format( self.name[0]) if f is not None: if isinstance(f, str): pth = os.path.join(self.parent.model_ws, f) f = open(pth, 'w') f.write(txt) f.close() if verbose: print(txt) return chk def level1_arraylist(self, idx, v, name, txt): ndim = v.ndim if ndim == 3: kon = -1 for [k, i, j] in idx: if k > kon: kon = k txt += ' {:>10s}{:>10s}{:>10s}{:>15s}\n'.format('layer', 'row', 'column', name[ k].lower().replace( ' layer ', '')) txt += ' {:10d}{:10d}{:10d}{:15.7g}\n'.format(k + 1, i + 1, j + 1, v[k, i, j]) elif ndim == 2: txt += ' {:>10s}{:>10s}{:>15s}\n'.format('row', 'column', name[ 0].lower().replace( ' layer ', '')) for [i, j] in idx: txt += ' {:10d}{:10d}{:15.7g}\n'.format(i + 1, j + 1, v[i, j]) elif ndim == 1: txt += ' {:>10s}{:>15s}\n'.format('number', name[0]) for i in idx: txt += ' {:10d}{:15.7g}\n'.format(i + 1, v[i]) return txt def plot(self, **kwargs): """ Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) package input data Parameters ---------- **kwargs : dict filename_base : str Base file name that will be used to automatically generate file names for output image files. Plots will be exported as image files if file_name_base is not None. (default is None) file_extension : str Valid matplotlib.pyplot file extension for savefig(). Only used if filename_base is not None. (default is 'png') mflay : int MODFLOW zero-based layer number to return. If None, then all all layers will be included. (default is None) kper : int MODFLOW zero-based stress period number to return. (default is zero) key : str MfList dictionary key. (default is None) Returns ---------- axes : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis are returned. See Also -------- Notes ----- Examples -------- >>> import flopy >>> ml = flopy.modflow.Modflow.load('test.nam') >>> ml.dis.plot() """ # valid keyword arguments if 'kper' in kwargs: kper = kwargs.pop('kper') else: kper = 0 if 'filename_base' in kwargs: fileb = kwargs.pop('filename_base') else: fileb = None if 'mflay' in kwargs: mflay = kwargs.pop('mflay') else: mflay = None if 'file_extension' in kwargs: fext = kwargs.pop('file_extension') fext = fext.replace('.', '') else: fext = 'png' if 'key' in kwargs: key = kwargs.pop('key') else: key = None if 'initial_fig' in kwargs: ifig = int(kwargs.pop('initial_fig')) else: ifig = 0 inc = self.parent.nlay if mflay is not None: inc = 1 axes = [] for item, value in self.__dict__.items(): caxs = [] if isinstance(value, MfList): if self.parent.verbose: print('plotting {} package MfList instance: {}'.format( self.name[0], item)) if key is None: names = ['{} location stress period {} layer {}'.format( self.name[0], kper + 1, k + 1) for k in range(self.parent.nlay)] colorbar = False else: names = ['{} {} data stress period {} layer {}'.format( self.name[0], key, kper + 1, k + 1) for k in range(self.parent.nlay)] colorbar = True fignum = list(range(ifig, ifig + inc)) ifig = fignum[-1] + 1 caxs.append(value.plot(key, names, kper, filename_base=fileb, file_extension=fext, mflay=mflay, fignum=fignum, colorbar=colorbar, **kwargs)) elif isinstance(value, Util3d): if self.parent.verbose: print('plotting {} package Util3d instance: {}'.format( self.name[0], item)) # fignum = list(range(ifig, ifig + inc)) fignum = list(range(ifig, ifig + value.shape[0])) ifig = fignum[-1] + 1 caxs.append( value.plot(filename_base=fileb, file_extension=fext, mflay=mflay, fignum=fignum, colorbar=True)) elif isinstance(value, Util2d): if len(value.shape) == 2: if self.parent.verbose: print('plotting {} package Util2d instance: {}'.format( self.name[0], item)) fignum = list(range(ifig, ifig + 1)) ifig = fignum[-1] + 1 caxs.append( value.plot(filename_base=fileb, file_extension=fext, fignum=fignum, colorbar=True)) elif isinstance(value, Transient2d): if self.parent.verbose: print( 'plotting {} package Transient2d instance: {}'.format( self.name[0], item)) fignum = list(range(ifig, ifig + inc)) ifig = fignum[-1] + 1 caxs.append( value.plot(filename_base=fileb, file_extension=fext, kper=kper, fignum=fignum, colorbar=True)) elif isinstance(value, list): for v in value: if isinstance(v, Util3d): if self.parent.verbose: print( 'plotting {} package Util3d instance: {}'.format( self.name[0], item)) fignum = list(range(ifig, ifig + inc)) ifig = fignum[-1] + 1 caxs.append( v.plot(filename_base=fileb, file_extension=fext, mflay=mflay, fignum=fignum, colorbar=True)) else: pass # unroll nested lists os axes into a single list of axes if isinstance(caxs, list): for c in caxs: if isinstance(c, list): for cc in c: axes.append(cc) else: axes.append(c) else: axes.append(caxs) return axes def to_shapefile(self, filename, **kwargs): """ Export 2-D, 3-D, and transient 2-D model data to shapefile (polygons). Adds an attribute for each layer in each data array Parameters ---------- filename : str Shapefile name to write Returns ---------- None See Also -------- Notes ----- Examples -------- >>> import flopy >>> ml = flopy.modflow.Modflow.load('test.nam') >>> ml.lpf.to_shapefile('test_hk.shp') """ import warnings warnings.warn("to_shapefile() is deprecated. use .export()") self.export(filename) def webdoc(self): if self.parent.version == 'mf2k': wb.open( 'http://water.usgs.gov/nrp/gwsoftware/modflow2000/Guide/' + self.url) elif self.parent.version == 'mf2005': wb.open( 'http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/' + self.url) elif self.parent.version == 'ModflowNwt': wb.open( 'http://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/' + self.url) def write_file(self, check=False): """ Every Package needs its own write_file function """ print('IMPLEMENTATION ERROR: write_file must be overloaded') return @staticmethod def load(model, pack_type, f, nper=None, pop_key_list=None, check=True, unitnumber=None, ext_unit_dict=None): """ The load method has not been implemented for this package. """ bc_pack_types = [] if not hasattr(f, 'read'): filename = f f = open(filename, 'r') # dataset 0 -- header while True: line = f.readline() if line[0] != '#': break # check for parameters nppak = 0 if "parameter" in line.lower(): t = line.strip().split() # assert int(t[1]) == 0,"Parameters are not supported" nppak = np.int(t[1]) mxl = 0 if nppak > 0: mxl = np.int(t[2]) if model.verbose: print(' Parameters detected. Number of parameters = ', nppak) line = f.readline() # dataset 2a t = line.strip().split() ipakcb = 0 try: ipakcb = int(t[1]) except: pass options = [] aux_names = [] if len(t) > 2: it = 2 while it < len(t): toption = t[it] if toption.lower() is 'noprint': options.append(toption) elif 'aux' in toption.lower(): options.append(' '.join(t[it:it + 2])) aux_names.append(t[it + 1].lower()) it += 1 it += 1 # set partype # and read phiramp for modflow-nwt well package partype = ['cond'] if "modflowwel" in str(pack_type).lower(): partype = ['flux'] if "nwt" in model.version.lower() and 'flopy.modflow.mfwel.modflowwel'.lower() in str(pack_type).lower(): specify = False ipos = f.tell() line = f.readline() # test for specify keyword if a NWT well file - This is a temporary hack if 'specify' in line.lower(): specify = True t = line.strip().split() phiramp = np.float32(t[1]) try: phiramp_unit = np.int32(t[2]) except: phiramp_unit = 2 options.append('specify {} {} '.format(phiramp, phiramp_unit)) else: f.seek(ipos) elif 'flopy.modflow.mfchd.modflowchd'.lower() in str( pack_type).lower(): partype = ['shead', 'ehead'] # read parameter data if nppak > 0: dt = pack_type.get_empty(1, aux_names=aux_names, structured=model.structured).dtype pak_parms = mfparbc.load(f, nppak, dt, model.verbose) # pak_parms = mfparbc.load(f, nppak, len(dt.names)) if nper is None: nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() # read data for every stress period bnd_output = None stress_period_data = {} for iper in range(nper): if model.verbose: print( " loading " + str( pack_type) + " for kper {0:5d}".format( iper + 1)) line = f.readline() if line == '': break t = line.strip().split() itmp = int(t[0]) itmpp = 0 try: itmpp = int(t[1]) except: pass if itmp == 0: bnd_output = None current = pack_type.get_empty(itmp, aux_names=aux_names, structured=model.structured) elif itmp > 0: current = pack_type.get_empty(itmp, aux_names=aux_names, structured=model.structured) for ibnd in range(itmp): line = f.readline() if "open/close" in line.lower(): # need to strip out existing path seps and # replace current-system path seps raw = line.strip().split() fname = raw[1] if '/' in fname: raw = fname.split('/') elif '\\' in fname: raw = fname.split('\\') else: raw = [fname] fname = os.path.join(*raw) oc_filename = os.path.join(model.model_ws, fname) assert os.path.exists( oc_filename), "Package.load() error: open/close filename " + \ oc_filename + " not found" try: current = np.genfromtxt(oc_filename, dtype=current.dtype) current = current.view(np.recarray) except Exception as e: raise Exception( "Package.load() error loading open/close file " + oc_filename + \ " :" + str(e)) assert current.shape[ 0] == itmp, "Package.load() error: open/close rec array from file " + \ oc_filename + " shape (" + str( current.shape) + \ ") does not match itmp: {0:d}".format( itmp) break try: t = line.strip().split() current[ibnd] = tuple(t[:len(current.dtype.names)]) except: t = [] for ivar in range(len(current.dtype.names)): istart = ivar * 10 istop = istart + 10 t.append(line[istart:istop]) current[ibnd] = tuple(t[:len(current.dtype.names)]) # convert indices to zero-based if model.structured: current['k'] -= 1 current['i'] -= 1 current['j'] -= 1 else: current['node'] -= 1 bnd_output = np.recarray.copy(current) else: bnd_output = np.recarray.copy(current) for iparm in range(itmpp): line = f.readline() t = line.strip().split() pname = t[0].lower() iname = 'static' try: tn = t[1] c = tn.lower() instance_dict = pak_parms.bc_parms[pname][1] if c in instance_dict: iname = c else: iname = 'static' except: pass par_dict, current_dict = pak_parms.get(pname) data_dict = current_dict[iname] par_current = pack_type.get_empty(par_dict['nlst'], aux_names=aux_names) # get appropriate parval if model.mfpar.pval is None: parval = np.float(par_dict['parval']) else: try: parval = np.float(model.mfpar.pval.pval_dict[pname]) except: parval = np.float(par_dict['parval']) # fill current parameter data (par_current) for ibnd, t in enumerate(data_dict): par_current[ibnd] = tuple(t[:len(par_current.dtype.names)]) if model.structured: par_current['k'] -= 1 par_current['i'] -= 1 par_current['j'] -= 1 else: par_current['node'] -= 1 for ptype in partype: par_current[ptype] *= parval if bnd_output is None: bnd_output = np.recarray.copy(par_current) else: bnd_output = stack_arrays((bnd_output, par_current), asrecarray=True, usemask=False) if bnd_output is None: stress_period_data[iper] = itmp else: stress_period_data[iper] = bnd_output dtype = pack_type.get_empty(0, aux_names=aux_names, structured=model.structured).dtype # set package unit number unitnumber = None filenames = [None, None] if ext_unit_dict is not None: unitnumber, filenames[0] = \ model.get_ext_dict_attr(ext_unit_dict, filetype=pack_type.ftype()) if ipakcb > 0: iu, filenames[1] = \ model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) model.add_pop_key_list(ipakcb) pak = pack_type(model, ipakcb=ipakcb, stress_period_data=stress_period_data,<|fim▁hole|> dtype=dtype, options=options, unitnumber=unitnumber, filenames=filenames) if check: pak.check(f='{}.chk'.format(pak.name[0]), verbose=pak.parent.verbose, level=0) return pak<|fim▁end|>
<|file_name|>imdb.py<|end_file_name|><|fim▁begin|># Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ IMDB dataset. This module downloads IMDB dataset from http://ai.stanford.edu/%7Eamaas/data/sentiment/. This dataset contains a set of 25,000 highly polar movie reviews for training, and 25,000 for testing. Besides, this module also provides API for building dictionary. """ from __future__ import print_function import paddle.dataset.common import collections import tarfile import re import string import six __all__ = ['build_dict', 'train', 'test'] #URL = 'http://ai.stanford.edu/%7Eamaas/data/sentiment/aclImdb_v1.tar.gz' URL = 'https://dataset.bj.bcebos.com/imdb%2FaclImdb_v1.tar.gz' MD5 = '7c2ac02c03563afcf9b574c7e56c153a' def tokenize(pattern): """ Read files that match the given pattern. Tokenize and yield each file. """ with tarfile.open(paddle.dataset.common.download(URL, 'imdb', MD5)) as tarf: # Note that we should use tarfile.next(), which does # sequential access of member files, other than # tarfile.extractfile, which does random access and might # destroy hard disks. tf = tarf.next() while tf != None: if bool(pattern.match(tf.name)): # newline and punctuations removal and ad-hoc tokenization. yield tarf.extractfile(tf).read().rstrip(six.b( "\n\r")).translate( None, six.b(string.punctuation)).lower().split() tf = tarf.next() def build_dict(pattern, cutoff): """ Build a word dictionary from the corpus. Keys of the dictionary are words, and values are zero-based IDs of these words. """ word_freq = collections.defaultdict(int) for doc in tokenize(pattern): for word in doc: word_freq[word] += 1 # Not sure if we should prune less-frequent words here. word_freq = [x for x in six.iteritems(word_freq) if x[1] > cutoff] dictionary = sorted(word_freq, key=lambda x: (-x[1], x[0])) words, _ = list(zip(*dictionary)) word_idx = dict(list(zip(words, six.moves.range(len(words))))) word_idx['<unk>'] = len(words) return word_idx def reader_creator(pos_pattern, neg_pattern, word_idx): UNK = word_idx['<unk>'] INS = [] def load(pattern, out, label): for doc in tokenize(pattern): out.append(([word_idx.get(w, UNK) for w in doc], label)) load(pos_pattern, INS, 0) load(neg_pattern, INS, 1) def reader(): for doc, label in INS: yield doc, label return reader def train(word_idx): """ IMDB training set creator. It returns a reader creator, each sample in the reader is an zero-based ID sequence and label in [0, 1]. :param word_idx: word dictionary :type word_idx: dict :return: Training reader creator :rtype: callable """ return reader_creator( re.compile("aclImdb/train/pos/.*\.txt$"), re.compile("aclImdb/train/neg/.*\.txt$"), word_idx) def test(word_idx): """ IMDB test set creator. It returns a reader creator, each sample in the reader is an zero-based ID sequence and label in [0, 1]. :param word_idx: word dictionary :type word_idx: dict :return: Test reader creator :rtype: callable """ return reader_creator( re.compile("aclImdb/test/pos/.*\.txt$"), re.compile("aclImdb/test/neg/.*\.txt$"), word_idx) def word_dict():<|fim▁hole|> """ Build a word dictionary from the corpus. :return: Word dictionary :rtype: dict """ return build_dict( re.compile("aclImdb/((train)|(test))/((pos)|(neg))/.*\.txt$"), 150) def fetch(): paddle.dataset.common.download(URL, 'imdb', MD5)<|fim▁end|>
<|file_name|>node-inspector.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
/** * Dummy file for grunt-nodemon to run node-inspector task */
<|file_name|>SFContentBlockerManager.java<|end_file_name|><|fim▁begin|>/* Copyright 2014-2016 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package apple.safariservices; import apple.NSObject; import apple.foundation.NSArray; import apple.foundation.NSError; import apple.foundation.NSMethodSignature; import apple.foundation.NSSet; import org.moe.natj.c.ann.FunctionPtr; import org.moe.natj.general.NatJ; import org.moe.natj.general.Pointer; import org.moe.natj.general.ann.Generated; import org.moe.natj.general.ann.Library; import org.moe.natj.general.ann.Mapped; import org.moe.natj.general.ann.NInt; import org.moe.natj.general.ann.NUInt; import org.moe.natj.general.ann.Owned; import org.moe.natj.general.ann.Runtime; import org.moe.natj.general.ptr.VoidPtr; import org.moe.natj.objc.Class; import org.moe.natj.objc.ObjCRuntime; import org.moe.natj.objc.SEL; import org.moe.natj.objc.ann.ObjCBlock; import org.moe.natj.objc.ann.ObjCClassBinding; import org.moe.natj.objc.ann.Selector; import org.moe.natj.objc.map.ObjCObjectMapper; @Generated @Library("SafariServices") @Runtime(ObjCRuntime.class) @ObjCClassBinding public class SFContentBlockerManager extends NSObject { static { NatJ.register(); } @Generated protected SFContentBlockerManager(Pointer peer) { super(peer); } @Generated @Selector("accessInstanceVariablesDirectly") public static native boolean accessInstanceVariablesDirectly(); @Generated @Owned @Selector("alloc") public static native SFContentBlockerManager alloc(); @Owned @Generated @Selector("allocWithZone:") public static native SFContentBlockerManager allocWithZone(VoidPtr zone); @Generated @Selector("automaticallyNotifiesObserversForKey:") public static native boolean automaticallyNotifiesObserversForKey(String key); @Generated @Selector("cancelPreviousPerformRequestsWithTarget:") public static native void cancelPreviousPerformRequestsWithTarget(@Mapped(ObjCObjectMapper.class) Object aTarget); @Generated @Selector("cancelPreviousPerformRequestsWithTarget:selector:object:") public static native void cancelPreviousPerformRequestsWithTargetSelectorObject( @Mapped(ObjCObjectMapper.class) Object aTarget, SEL aSelector, @Mapped(ObjCObjectMapper.class) Object anArgument); @Generated @Selector("classFallbacksForKeyedArchiver") public static native NSArray<String> classFallbacksForKeyedArchiver(); @Generated @Selector("classForKeyedUnarchiver") public static native Class classForKeyedUnarchiver(); @Generated @Selector("debugDescription") public static native String debugDescription_static(); @Generated<|fim▁hole|> public static native String description_static(); @Generated @Selector("getStateOfContentBlockerWithIdentifier:completionHandler:") public static native void getStateOfContentBlockerWithIdentifierCompletionHandler(String identifier, @ObjCBlock(name = "call_getStateOfContentBlockerWithIdentifierCompletionHandler") Block_getStateOfContentBlockerWithIdentifierCompletionHandler completionHandler); @Generated @Selector("hash") @NUInt public static native long hash_static(); @Generated @Selector("instanceMethodForSelector:") @FunctionPtr(name = "call_instanceMethodForSelector_ret") public static native NSObject.Function_instanceMethodForSelector_ret instanceMethodForSelector(SEL aSelector); @Generated @Selector("instanceMethodSignatureForSelector:") public static native NSMethodSignature instanceMethodSignatureForSelector(SEL aSelector); @Generated @Selector("instancesRespondToSelector:") public static native boolean instancesRespondToSelector(SEL aSelector); @Generated @Selector("isSubclassOfClass:") public static native boolean isSubclassOfClass(Class aClass); @Generated @Selector("keyPathsForValuesAffectingValueForKey:") public static native NSSet<String> keyPathsForValuesAffectingValueForKey(String key); @Generated @Owned @Selector("new") public static native SFContentBlockerManager new_objc(); @Generated @Selector("reloadContentBlockerWithIdentifier:completionHandler:") public static native void reloadContentBlockerWithIdentifierCompletionHandler(String identifier, @ObjCBlock(name = "call_reloadContentBlockerWithIdentifierCompletionHandler") Block_reloadContentBlockerWithIdentifierCompletionHandler completionHandler); @Generated @Selector("resolveClassMethod:") public static native boolean resolveClassMethod(SEL sel); @Generated @Selector("resolveInstanceMethod:") public static native boolean resolveInstanceMethod(SEL sel); @Generated @Selector("setVersion:") public static native void setVersion_static(@NInt long aVersion); @Generated @Selector("superclass") public static native Class superclass_static(); @Generated @Selector("version") @NInt public static native long version_static(); @Generated @Selector("init") public native SFContentBlockerManager init(); @Runtime(ObjCRuntime.class) @Generated public interface Block_getStateOfContentBlockerWithIdentifierCompletionHandler { @Generated void call_getStateOfContentBlockerWithIdentifierCompletionHandler(SFContentBlockerState state, NSError error); } @Runtime(ObjCRuntime.class) @Generated public interface Block_reloadContentBlockerWithIdentifierCompletionHandler { @Generated void call_reloadContentBlockerWithIdentifierCompletionHandler(NSError error); } }<|fim▁end|>
@Selector("description")
<|file_name|>pisa_util.py<|end_file_name|><|fim▁begin|># -*- coding: ISO-8859-1 -*- # Copyright 2010 Dirk Holtwick, holtwick.it # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __reversion__ = "$Revision: 20 $" __author__ = "$Author: holtwick $" __date__ = "$Date: 2007-10-09 12:58:24 +0200 (Di, 09 Okt 2007) $" from reportlab.lib.units import inch, cm from reportlab.lib.styles import * from reportlab.lib.enums import * from reportlab.lib.colors import * from reportlab.lib.pagesizes import * from reportlab.pdfbase import pdfmetrics # from reportlab.platypus import * # from reportlab.platypus.flowables import Flowable # from reportlab.platypus.tableofcontents import TableOfContents # from reportlab.platypus.para import Para, PageNumberObject, UNDERLINE, HotLink import reportlab import copy import types import os import os.path import pprint import sys import string import re import base64 import urlparse import mimetypes import urllib2 import urllib import httplib import tempfile import shutil rgb_re = re.compile("^.*?rgb[(]([0-9]+).*?([0-9]+).*?([0-9]+)[)].*?[ ]*$") _reportlab_version = tuple(map(int, reportlab.Version.split('.'))) if _reportlab_version < (2,1): raise ImportError("Reportlab Version 2.1+ is needed!") REPORTLAB22 = _reportlab_version >= (2, 2) #if not(reportlab.Version[0] == "2" and reportlab.Version[2] >= "1"): # raise ImportError("Reportlab Version 2.1+ is needed!") # #REPORTLAB22 = (reportlab.Version[0] == "2" and reportlab.Version[2] >= "2") # print "***", reportlab.Version, REPORTLAB22, reportlab.__file__ import logging log = logging.getLogger("ho.pisa") try: import cStringIO as StringIO except: import StringIO try: import pyPdf except: pyPdf = None try: from reportlab.graphics import renderPM except: renderPM = None try: from reportlab.graphics import renderSVG except: renderSVG = None def ErrorMsg(): """ Helper to get a nice traceback as string """ import traceback, sys, cgi type = value = tb = limit = None type, value, tb = sys.exc_info() list = traceback.format_tb(tb, limit) + traceback.format_exception_only(type, value) return "Traceback (innermost last):\n" + "%-20s %s" % ( string.join(list[: - 1], ""), list[ - 1]) def toList(value): if type(value) not in (types.ListType, types.TupleType): return [value] return list(value) def flatten(x): """flatten(sequence) -> list copied from http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks Returns a single, flat list which contains all elements retrieved from the sequence and all recursively contained sub-sequences (iterables). Examples: >>> [1, 2, [3,4], (5,6)] [1, 2, [3, 4], (5, 6)] >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)]) [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]""" result = [] for el in x: #if isinstance(el, (list, tuple)): if hasattr(el, "__iter__") and not isinstance(el, basestring): result.extend(flatten(el)) else: result.append(el) return result def _toColor(arg, default=None): '''try to map an arbitrary arg to a color instance''' if isinstance(arg, Color): return arg tArg = type(arg) if tArg in (types.ListType, types.TupleType): assert 3 <= len(arg) <= 4, 'Can only convert 3 and 4 sequences to color' assert 0 <= min(arg) and max(arg) <= 1 return len(arg) == 3 and Color(arg[0], arg[1], arg[2]) or CMYKColor(arg[0], arg[1], arg[2], arg[3]) elif tArg == types.StringType: C = getAllNamedColors() s = arg.lower() if C.has_key(s): return C[s] try: return toColor(eval(arg)) except: pass try: return HexColor(arg) except: if default is None: raise ValueError('Invalid color value %r' % arg) return default def getColor(value, default=None): " Convert to color value " try: original = value if isinstance(value, Color): return value value = str(value).strip().lower() if value == "transparent" or value == "none": return default if value in COLOR_BY_NAME: return COLOR_BY_NAME[value] if value.startswith("#") and len(value) == 4: value = "#" + value[1] + value[1] + value[2] + value[2] + value[3] + value[3] elif rgb_re.search(value): # e.g., value = "<css function: rgb(153, 51, 153)>", go figure: r, g, b = [int(x) for x in rgb_re.search(value).groups()] value = "#%02x%02x%02x" % (r, g, b) else: # Shrug pass # XXX Throws illegal in 2.1 e.g. toColor('none'), # therefore we have a workaround here return _toColor(value) except ValueError, e: log.warn("Unknown color %r", original) return default def getBorderStyle(value, default=None): # log.debug(value) if value and (str(value).lower() not in ("none", "hidden")): return value return default mm = cm / 10.0 dpi96 = (1.0 / 96.0 * inch) _absoluteSizeTable = { "1": 50.0 / 100.0, "xx-small": 50.0 / 100.0, "x-small": 50.0 / 100.0, "2": 75.0 / 100.0, "small": 75.0 / 100.0, "3": 100.0 / 100.0, "medium": 100.0 / 100.0, "4": 125.0 / 100.0, "large": 125.0 / 100.0, "5": 150.0 / 100.0, "x-large": 150.0 / 100.0, "6": 175.0 / 100.0, "xx-large": 175.0 / 100.0, "7": 200.0 / 100.0, "xxx-large": 200.0 / 100.0, #"xx-small" : 3./5., #"x-small": 3./4., #"small": 8./9., #"medium": 1./1., #"large": 6./5., #"x-large": 3./2., #"xx-large": 2./1., #"xxx-large": 3./1., } _relativeSizeTable = { "larger": 1.25, "smaller": 0.75, "+4": 200.0 / 100.0, "+3": 175.0 / 100.0, "+2": 150.0 / 100.0, "+1": 125.0 / 100.0, "-1": 75.0 / 100.0, "-2": 50.0 / 100.0, "-3": 25.0 / 100.0, } MIN_FONT_SIZE = 1.0 def getSize(value, relative=0, base=None, default=0.0): """ Converts strings to standard sizes """ try: original = value if value is None: return relative elif type(value) is types.FloatType: return value elif type(value) is types.IntType: return float(value) elif type(value) in (types.TupleType, types.ListType): value = "".join(value) value = str(value).strip().lower().replace(",", ".") if value[ - 2:] == 'cm': return float(value[: - 2].strip()) * cm elif value[ - 2:] == 'mm': return (float(value[: - 2].strip()) * mm) # 1mm = 0.1cm elif value[ - 2:] == 'in': return float(value[: - 2].strip()) * inch # 1pt == 1/72inch elif value[ - 2:] == 'inch': return float(value[: - 4].strip()) * inch # 1pt == 1/72inch elif value[ - 2:] == 'pt': return float(value[: - 2].strip()) elif value[ - 2:] == 'pc': return float(value[: - 2].strip()) * 12.0 # 1pc == 12pt elif value[ - 2:] == 'px': return float(value[: - 2].strip()) * dpi96 # XXX W3C says, use 96pdi http://www.w3.org/TR/CSS21/syndata.html#length-units elif value[ - 1:] == 'i': # 1pt == 1/72inch return float(value[: - 1].strip()) * inch elif value in ("none", "0", "auto"): return 0.0 elif relative: if value[ - 2:] == 'em': # XXX return (float(value[: - 2].strip()) * relative) # 1em = 1 * fontSize elif value[ - 2:] == 'ex': # XXX return (float(value[: - 2].strip()) * (relative / 2.0)) # 1ex = 1/2 fontSize elif value[ - 1:] == '%': # print "%", value, relative, (relative * float(value[:-1].strip())) / 100.0 return (relative * float(value[: - 1].strip())) / 100.0 # 1% = (fontSize * 1) / 100 elif value in ("normal", "inherit"): return relative elif _relativeSizeTable.has_key(value): if base: return max(MIN_FONT_SIZE, base * _relativeSizeTable[value]) return max(MIN_FONT_SIZE, relative * _relativeSizeTable[value]) elif _absoluteSizeTable.has_key(value): if base: return max(MIN_FONT_SIZE, base * _absoluteSizeTable[value]) return max(MIN_FONT_SIZE, relative * _absoluteSizeTable[value]) try: value = float(value) except: log.warn("getSize: Not a float %r", value) return default #value = 0 return max(0, value) except Exception: log.warn("getSize %r %r", original, relative, exc_info=1) # print "ERROR getSize", repr(value), repr(value), e return default def getCoords(x, y, w, h, pagesize): """ As a stupid programmer I like to use the upper left corner of the document as the 0,0 coords therefore we need to do some fancy calculations """ #~ print pagesize ax, ay = pagesize if x < 0: x = ax + x if y < 0: y = ay + y if w != None and h != None: if w <= 0: w = (ax - x + w) if h <= 0: h = (ay - y + h) return x, (ay - y - h), w, h return x, (ay - y) def getBox(box, pagesize): """ Parse sizes by corners in the form: <X-Left> <Y-Upper> <Width> <Height> The last to values with negative values are interpreted as offsets form the right and lower border. """ box = str(box).split() if len(box) != 4: raise Exception, "box not defined right way" x, y, w, h = map(getSize, box) return getCoords(x, y, w, h, pagesize) def getPos(position, pagesize): """ Pair of coordinates """ position = str(position).split() if len(position) != 2: raise Exception, "position not defined right way" x, y = map(getSize, position) return getCoords(x, y, None, None, pagesize) def getBool(s): " Is it a boolean? " return str(s).lower() in ("y", "yes", "1", "true") _uid = 0 def getUID(): " Unique ID " global _uid _uid += 1 return str(_uid) _alignments = { "left": TA_LEFT, "center": TA_CENTER, "middle": TA_CENTER, "right": TA_RIGHT, "justify": TA_JUSTIFY, } def getAlign(value, default=TA_LEFT): return _alignments.get(str(value).lower(), default) #def getVAlign(value): # # Unused # return str(value).upper() GAE = "google.appengine" in sys.modules if GAE: STRATEGIES = ( StringIO.StringIO, StringIO.StringIO) else: STRATEGIES = ( StringIO.StringIO, tempfile.NamedTemporaryFile) class pisaTempFile(object): """A temporary file implementation that uses memory unless either capacity is breached or fileno is requested, at which point a real temporary file will be created and the relevant details returned If capacity is -1 the second strategy will never be used. Inspired by: http://code.activestate.com/recipes/496744/ """ STRATEGIES = STRATEGIES CAPACITY = 10 * 1024 def __init__(self, buffer="", capacity=CAPACITY): """Creates a TempFile object containing the specified buffer. If capacity is specified, we use a real temporary file once the file gets larger than that size. Otherwise, the data is stored in memory. """ #if hasattr(buffer, "read"): #shutil.copyfileobj( fsrc, fdst[, length]) self.capacity = capacity self.strategy = int(len(buffer) > self.capacity) try: self._delegate = self.STRATEGIES[self.strategy]() except: # Fallback for Google AppEnginge etc. self._delegate = self.STRATEGIES[0]() self.write(buffer) def makeTempFile(self): " Switch to next startegy. If an error occured stay with the first strategy " if self.strategy == 0: try: new_delegate = self.STRATEGIES[1]() new_delegate.write(self.getvalue()) self._delegate = new_delegate self.strategy = 1 log.warn("Created temporary file %s", self.name) except: self.capacity = - 1 def getFileName(self): " Get a named temporary file " self.makeTempFile() return self.name def fileno(self): """Forces this buffer to use a temporary file as the underlying. object and returns the fileno associated with it. """ self.makeTempFile() return self._delegate.fileno() def getvalue(self): " Get value of file. Work around for second strategy " if self.strategy == 0: return self._delegate.getvalue() self._delegate.flush() self._delegate.seek(0) return self._delegate.read() def write(self, value): " If capacity != -1 and length of file > capacity it is time to switch " if self.capacity > 0 and self.strategy == 0: len_value = len(value) if len_value >= self.capacity: needs_new_strategy = True else: self.seek(0, 2) # find end of file needs_new_strategy = \ (self.tell() + len_value) >= self.capacity if needs_new_strategy: self.makeTempFile() self._delegate.write(value) def __getattr__(self, name): try: <|fim▁hole|> # hide the delegation e = "object '%s' has no attribute '%s'" \ % (self.__class__.__name__, name) raise AttributeError(e) _rx_datauri = re.compile("^data:(?P<mime>[a-z]+/[a-z]+);base64,(?P<data>.*)$", re.M | re.DOTALL) class pisaFileObject: """ XXX """ def __init__(self, uri, basepath=None): self.basepath = basepath self.mimetype = None self.file = None self.data = None self.uri = None self.local = None self.tmp_file = None uri = str(uri) log.debug("FileObject %r, Basepath: %r", uri, basepath) # Data URI if uri.startswith("data:"): m = _rx_datauri.match(uri) self.mimetype = m.group("mime") self.data = base64.decodestring(m.group("data")) else: # Check if we have an external scheme if basepath and not (uri.startswith("http://") or uri.startswith("https://")): urlParts = urlparse.urlparse(basepath) else: urlParts = urlparse.urlparse(uri) log.debug("URLParts: %r", urlParts) # Drive letters have len==1 but we are looking for things like http: if len(urlParts[0]) > 1 : # External data if basepath: uri = urlparse.urljoin(basepath, uri) #path = urlparse.urlsplit(url)[2] #mimetype = getMimeType(path) # Using HTTPLIB server, path = urllib.splithost(uri[uri.find("//"):]) if uri.startswith("https://"): conn = httplib.HTTPSConnection(server) else: conn = httplib.HTTPConnection(server) conn.request("GET", path) r1 = conn.getresponse() # log.debug("HTTP %r %r %r %r", server, path, uri, r1) if (r1.status, r1.reason) == (200, "OK"): # data = r1.read() self.mimetype = r1.getheader("Content-Type", None).split(";")[0] self.uri = uri if r1.getheader("content-encoding") == "gzip": # zbuf = cStringIO.StringIO(data) import gzip self.file = gzip.GzipFile(mode="rb", fileobj=r1) #data = zfile.read() #zfile.close() else: self.file = r1 # self.file = urlResponse else: urlResponse = urllib2.urlopen(uri) self.mimetype = urlResponse.info().get("Content-Type", None).split(";")[0] self.uri = urlResponse.geturl() self.file = urlResponse else: # Local data if basepath: uri = os.path.normpath(os.path.join(basepath, uri)) if os.path.isfile(uri): self.uri = uri self.local = uri self.setMimeTypeByName(uri) self.file = open(uri, "rb") def getFile(self): if self.file is not None: return self.file if self.data is not None: return pisaTempFile(self.data) return None def getNamedFile(self): if self.notFound(): return None if self.local: return str(self.local) if not self.tmp_file: self.tmp_file = tempfile.NamedTemporaryFile() if self.file: shutil.copyfileobj(self.file, self.tmp_file) else: self.tmp_file.write(self.getData()) self.tmp_file.flush() return self.tmp_file.name def getData(self): if self.data is not None: return self.data if self.file is not None: self.data = self.file.read() return self.data return None def notFound(self): return (self.file is None) and (self.data is None) def setMimeTypeByName(self, name): " Guess the mime type " mimetype = mimetypes.guess_type(name)[0] if mimetype is not None: self.mimetype = mimetypes.guess_type(name)[0].split(";")[0] def getFile(*a , **kw): file = pisaFileObject(*a, **kw) if file.notFound(): return None return file COLOR_BY_NAME = { 'activeborder': Color(212, 208, 200), 'activecaption': Color(10, 36, 106), 'aliceblue': Color(.941176, .972549, 1), 'antiquewhite': Color(.980392, .921569, .843137), 'appworkspace': Color(128, 128, 128), 'aqua': Color(0, 1, 1), 'aquamarine': Color(.498039, 1, .831373), 'azure': Color(.941176, 1, 1), 'background': Color(58, 110, 165), 'beige': Color(.960784, .960784, .862745), 'bisque': Color(1, .894118, .768627), 'black': Color(0, 0, 0), 'blanchedalmond': Color(1, .921569, .803922), 'blue': Color(0, 0, 1), 'blueviolet': Color(.541176, .168627, .886275), 'brown': Color(.647059, .164706, .164706), 'burlywood': Color(.870588, .721569, .529412), 'buttonface': Color(212, 208, 200), 'buttonhighlight': Color(255, 255, 255), 'buttonshadow': Color(128, 128, 128), 'buttontext': Color(0, 0, 0), 'cadetblue': Color(.372549, .619608, .627451), 'captiontext': Color(255, 255, 255), 'chartreuse': Color(.498039, 1, 0), 'chocolate': Color(.823529, .411765, .117647), 'coral': Color(1, .498039, .313725), 'cornflowerblue': Color(.392157, .584314, .929412), 'cornsilk': Color(1, .972549, .862745), 'crimson': Color(.862745, .078431, .235294), 'cyan': Color(0, 1, 1), 'darkblue': Color(0, 0, .545098), 'darkcyan': Color(0, .545098, .545098), 'darkgoldenrod': Color(.721569, .52549, .043137), 'darkgray': Color(.662745, .662745, .662745), 'darkgreen': Color(0, .392157, 0), 'darkgrey': Color(.662745, .662745, .662745), 'darkkhaki': Color(.741176, .717647, .419608), 'darkmagenta': Color(.545098, 0, .545098), 'darkolivegreen': Color(.333333, .419608, .184314), 'darkorange': Color(1, .54902, 0), 'darkorchid': Color(.6, .196078, .8), 'darkred': Color(.545098, 0, 0), 'darksalmon': Color(.913725, .588235, .478431), 'darkseagreen': Color(.560784, .737255, .560784), 'darkslateblue': Color(.282353, .239216, .545098), 'darkslategray': Color(.184314, .309804, .309804), 'darkslategrey': Color(.184314, .309804, .309804), 'darkturquoise': Color(0, .807843, .819608), 'darkviolet': Color(.580392, 0, .827451), 'deeppink': Color(1, .078431, .576471), 'deepskyblue': Color(0, .74902, 1), 'dimgray': Color(.411765, .411765, .411765), 'dimgrey': Color(.411765, .411765, .411765), 'dodgerblue': Color(.117647, .564706, 1), 'firebrick': Color(.698039, .133333, .133333), 'floralwhite': Color(1, .980392, .941176), 'forestgreen': Color(.133333, .545098, .133333), 'fuchsia': Color(1, 0, 1), 'gainsboro': Color(.862745, .862745, .862745), 'ghostwhite': Color(.972549, .972549, 1), 'gold': Color(1, .843137, 0), 'goldenrod': Color(.854902, .647059, .12549), 'gray': Color(.501961, .501961, .501961), 'graytext': Color(128, 128, 128), 'green': Color(0, .501961, 0), 'greenyellow': Color(.678431, 1, .184314), 'grey': Color(.501961, .501961, .501961), 'highlight': Color(10, 36, 106), 'highlighttext': Color(255, 255, 255), 'honeydew': Color(.941176, 1, .941176), 'hotpink': Color(1, .411765, .705882), 'inactiveborder': Color(212, 208, 200), 'inactivecaption': Color(128, 128, 128), 'inactivecaptiontext': Color(212, 208, 200), 'indianred': Color(.803922, .360784, .360784), 'indigo': Color(.294118, 0, .509804), 'infobackground': Color(255, 255, 225), 'infotext': Color(0, 0, 0), 'ivory': Color(1, 1, .941176), 'khaki': Color(.941176, .901961, .54902), 'lavender': Color(.901961, .901961, .980392), 'lavenderblush': Color(1, .941176, .960784), 'lawngreen': Color(.486275, .988235, 0), 'lemonchiffon': Color(1, .980392, .803922), 'lightblue': Color(.678431, .847059, .901961), 'lightcoral': Color(.941176, .501961, .501961), 'lightcyan': Color(.878431, 1, 1), 'lightgoldenrodyellow': Color(.980392, .980392, .823529), 'lightgray': Color(.827451, .827451, .827451), 'lightgreen': Color(.564706, .933333, .564706), 'lightgrey': Color(.827451, .827451, .827451), 'lightpink': Color(1, .713725, .756863), 'lightsalmon': Color(1, .627451, .478431), 'lightseagreen': Color(.12549, .698039, .666667), 'lightskyblue': Color(.529412, .807843, .980392), 'lightslategray': Color(.466667, .533333, .6), 'lightslategrey': Color(.466667, .533333, .6), 'lightsteelblue': Color(.690196, .768627, .870588), 'lightyellow': Color(1, 1, .878431), 'lime': Color(0, 1, 0), 'limegreen': Color(.196078, .803922, .196078), 'linen': Color(.980392, .941176, .901961), 'magenta': Color(1, 0, 1), 'maroon': Color(.501961, 0, 0), 'mediumaquamarine': Color(.4, .803922, .666667), 'mediumblue': Color(0, 0, .803922), 'mediumorchid': Color(.729412, .333333, .827451), 'mediumpurple': Color(.576471, .439216, .858824), 'mediumseagreen': Color(.235294, .701961, .443137), 'mediumslateblue': Color(.482353, .407843, .933333), 'mediumspringgreen': Color(0, .980392, .603922), 'mediumturquoise': Color(.282353, .819608, .8), 'mediumvioletred': Color(.780392, .082353, .521569), 'menu': Color(212, 208, 200), 'menutext': Color(0, 0, 0), 'midnightblue': Color(.098039, .098039, .439216), 'mintcream': Color(.960784, 1, .980392), 'mistyrose': Color(1, .894118, .882353), 'moccasin': Color(1, .894118, .709804), 'navajowhite': Color(1, .870588, .678431), 'navy': Color(0, 0, .501961), 'oldlace': Color(.992157, .960784, .901961), 'olive': Color(.501961, .501961, 0), 'olivedrab': Color(.419608, .556863, .137255), 'orange': Color(1, .647059, 0), 'orangered': Color(1, .270588, 0), 'orchid': Color(.854902, .439216, .839216), 'palegoldenrod': Color(.933333, .909804, .666667), 'palegreen': Color(.596078, .984314, .596078), 'paleturquoise': Color(.686275, .933333, .933333), 'palevioletred': Color(.858824, .439216, .576471), 'papayawhip': Color(1, .937255, .835294), 'peachpuff': Color(1, .854902, .72549), 'peru': Color(.803922, .521569, .247059), 'pink': Color(1, .752941, .796078), 'plum': Color(.866667, .627451, .866667), 'powderblue': Color(.690196, .878431, .901961), 'purple': Color(.501961, 0, .501961), 'red': Color(1, 0, 0), 'rosybrown': Color(.737255, .560784, .560784), 'royalblue': Color(.254902, .411765, .882353), 'saddlebrown': Color(.545098, .270588, .07451), 'salmon': Color(.980392, .501961, .447059), 'sandybrown': Color(.956863, .643137, .376471), 'scrollbar': Color(212, 208, 200), 'seagreen': Color(.180392, .545098, .341176), 'seashell': Color(1, .960784, .933333), 'sienna': Color(.627451, .321569, .176471), 'silver': Color(.752941, .752941, .752941), 'skyblue': Color(.529412, .807843, .921569), 'slateblue': Color(.415686, .352941, .803922), 'slategray': Color(.439216, .501961, .564706), 'slategrey': Color(.439216, .501961, .564706), 'snow': Color(1, .980392, .980392), 'springgreen': Color(0, 1, .498039), 'steelblue': Color(.27451, .509804, .705882), 'tan': Color(.823529, .705882, .54902), 'teal': Color(0, .501961, .501961), 'thistle': Color(.847059, .74902, .847059), 'threeddarkshadow': Color(64, 64, 64), 'threedface': Color(212, 208, 200), 'threedhighlight': Color(255, 255, 255), 'threedlightshadow': Color(212, 208, 200), 'threedshadow': Color(128, 128, 128), 'tomato': Color(1, .388235, .278431), 'turquoise': Color(.25098, .878431, .815686), 'violet': Color(.933333, .509804, .933333), 'wheat': Color(.960784, .870588, .701961), 'white': Color(1, 1, 1), 'whitesmoke': Color(.960784, .960784, .960784), 'window': Color(255, 255, 255), 'windowframe': Color(0, 0, 0), 'windowtext': Color(0, 0, 0), 'yellow': Color(1, 1, 0), 'yellowgreen': Color(.603922, .803922, .196078)}<|fim▁end|>
return getattr(self._delegate, name) except AttributeError:
<|file_name|>flow.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Servo's experimental layout system builds a tree of `Flow` and `Fragment` objects and solves //! layout constraints to obtain positions and display attributes of tree nodes. Positions are //! computed in several tree traversals driven by the fundamental data dependencies required by //! inline and block layout. //! //! Flows are interior nodes in the layout tree and correspond closely to *flow contexts* in the //! CSS specification. Flows are responsible for positioning their child flow contexts and //! fragments. Flows have purpose-specific fields, such as auxiliary line structs, out-of-flow //! child lists, and so on. //! //! Currently, the important types of flows are: //! //! * `BlockFlow`: A flow that establishes a block context. It has several child flows, each of //! which are positioned according to block formatting context rules (CSS block boxes). Block //! flows also contain a single box to represent their rendered borders, padding, etc. //! The BlockFlow at the root of the tree has special behavior: it stretches to the boundaries of //! the viewport. //! //! * `InlineFlow`: A flow that establishes an inline context. It has a flat list of child //! fragments/flows that are subject to inline layout and line breaking and structs to represent //! line breaks and mapping to CSS boxes, for the purpose of handling `getClientRects()` and //! similar methods. use crate::block::{BlockFlow, FormattingContextType}; use crate::context::LayoutContext; use crate::display_list::items::ClippingAndScrolling; use crate::display_list::{DisplayListBuildState, StackingContextCollectionState}; use crate::flex::FlexFlow; use crate::floats::{Floats, SpeculatedFloatPlacement}; use crate::flow_list::{FlowList, FlowListIterator, MutFlowListIterator}; use crate::flow_ref::{FlowRef, WeakFlowRef}; use crate::fragment::{CoordinateSystem, Fragment, FragmentBorderBoxIterator, Overflow}; use crate::inline::InlineFlow; use crate::model::{CollapsibleMargins, IntrinsicISizes, MarginCollapseInfo}; use crate::multicol::MulticolFlow; use crate::parallel::FlowParallelInfo; use crate::table::TableFlow; use crate::table_caption::TableCaptionFlow; use crate::table_cell::TableCellFlow; use crate::table_colgroup::TableColGroupFlow; use crate::table_row::TableRowFlow; use crate::table_rowgroup::TableRowGroupFlow; use crate::table_wrapper::TableWrapperFlow; use app_units::Au; use euclid::{Point2D, Rect, Size2D, Vector2D}; use gfx_traits::print_tree::PrintTree; use gfx_traits::StackingContextId; use num_traits::cast::FromPrimitive; use serde::ser::{Serialize, SerializeStruct, Serializer}; use servo_geometry::{au_rect_to_f32_rect, f32_rect_to_au_rect, MaxRect}; use std::fmt; use std::iter::Zip; use std::slice::IterMut; use std::sync::atomic::Ordering; use std::sync::Arc; use style::computed_values::clear::T as Clear; use style::computed_values::float::T as Float; use style::computed_values::overflow_x::T as StyleOverflow; use style::computed_values::position::T as Position; use style::computed_values::text_align::T as TextAlign; use style::context::SharedStyleContext; use style::logical_geometry::{LogicalRect, LogicalSize, WritingMode}; use style::properties::ComputedValues; use style::selector_parser::RestyleDamage; use style::servo::restyle_damage::ServoRestyleDamage; use style::values::computed::LengthPercentageOrAuto; use webrender_api::LayoutTransform; /// This marker trait indicates that a type is a struct with `#[repr(C)]` whose first field /// is of type `BaseFlow` or some type that also implements this trait. /// /// In other words, the memory representation of `BaseFlow` must be a prefix /// of the memory representation of types implementing `HasBaseFlow`. #[allow(unsafe_code)] pub unsafe trait HasBaseFlow {} /// Methods to get the `BaseFlow` from any `HasBaseFlow` type. pub trait GetBaseFlow { fn base(&self) -> &BaseFlow; fn mut_base(&mut self) -> &mut BaseFlow; } impl<T: HasBaseFlow + ?Sized> GetBaseFlow for T { #[inline(always)] #[allow(unsafe_code)] fn base(&self) -> &BaseFlow { let ptr: *const Self = self; let ptr = ptr as *const BaseFlow; unsafe { &*ptr } } #[inline(always)] #[allow(unsafe_code)] fn mut_base(&mut self) -> &mut BaseFlow { let ptr: *mut Self = self; let ptr = ptr as *mut BaseFlow; unsafe { &mut *ptr } } } /// Virtual methods that make up a float context. /// /// Note that virtual methods have a cost; we should not overuse them in Servo. Consider adding /// methods to `ImmutableFlowUtils` or `MutableFlowUtils` before adding more methods here. pub trait Flow: HasBaseFlow + fmt::Debug + Sync + Send + 'static { // RTTI // // TODO(pcwalton): Use Rust's RTTI, once that works. /// Returns the class of flow that this is. fn class(&self) -> FlowClass; /// If this is a block flow, returns the underlying object. Fails otherwise. fn as_block(&self) -> &BlockFlow { panic!("called as_block() on a non-block flow") } /// If this is a block flow, returns the underlying object, borrowed mutably. Fails otherwise. fn as_mut_block(&mut self) -> &mut BlockFlow { debug!("called as_mut_block() on a flow of type {:?}", self.class()); panic!("called as_mut_block() on a non-block flow") } /// If this is a flex flow, returns the underlying object. Fails otherwise. fn as_flex(&self) -> &FlexFlow { panic!("called as_flex() on a non-flex flow") } /// If this is a flex flow, returns the underlying object, borrowed mutably. Fails otherwise. fn as_mut_flex(&mut self) -> &mut FlexFlow { panic!("called as_mut_flex() on a non-flex flow") } /// If this is an inline flow, returns the underlying object. Fails otherwise. fn as_inline(&self) -> &InlineFlow { panic!("called as_inline() on a non-inline flow") } /// If this is an inline flow, returns the underlying object, borrowed mutably. Fails /// otherwise. fn as_mut_inline(&mut self) -> &mut InlineFlow { panic!("called as_mut_inline() on a non-inline flow") } /// If this is a table wrapper flow, returns the underlying object, borrowed mutably. Fails /// otherwise. fn as_mut_table_wrapper(&mut self) -> &mut TableWrapperFlow { panic!("called as_mut_table_wrapper() on a non-tablewrapper flow") } /// If this is a table wrapper flow, returns the underlying object. Fails otherwise. fn as_table_wrapper(&self) -> &TableWrapperFlow { panic!("called as_table_wrapper() on a non-tablewrapper flow") } /// If this is a table flow, returns the underlying object, borrowed mutably. Fails otherwise. fn as_mut_table(&mut self) -> &mut TableFlow { panic!("called as_mut_table() on a non-table flow") } /// If this is a table flow, returns the underlying object. Fails otherwise. fn as_table(&self) -> &TableFlow { panic!("called as_table() on a non-table flow") } /// If this is a table colgroup flow, returns the underlying object, borrowed mutably. Fails /// otherwise. fn as_mut_table_colgroup(&mut self) -> &mut TableColGroupFlow { panic!("called as_mut_table_colgroup() on a non-tablecolgroup flow") } /// If this is a table colgroup flow, returns the underlying object. Fails /// otherwise. fn as_table_colgroup(&self) -> &TableColGroupFlow { panic!("called as_table_colgroup() on a non-tablecolgroup flow") } /// If this is a table rowgroup flow, returns the underlying object, borrowed mutably. Fails /// otherwise. fn as_mut_table_rowgroup(&mut self) -> &mut TableRowGroupFlow { panic!("called as_mut_table_rowgroup() on a non-tablerowgroup flow") } /// If this is a table rowgroup flow, returns the underlying object. Fails otherwise. fn as_table_rowgroup(&self) -> &TableRowGroupFlow { panic!("called as_table_rowgroup() on a non-tablerowgroup flow") } /// If this is a table row flow, returns the underlying object, borrowed mutably. Fails /// otherwise. fn as_mut_table_row(&mut self) -> &mut TableRowFlow { panic!("called as_mut_table_row() on a non-tablerow flow") } /// If this is a table row flow, returns the underlying object. Fails otherwise. fn as_table_row(&self) -> &TableRowFlow { panic!("called as_table_row() on a non-tablerow flow") } /// If this is a table cell flow, returns the underlying object, borrowed mutably. Fails /// otherwise. fn as_mut_table_caption(&mut self) -> &mut TableCaptionFlow { panic!("called as_mut_table_caption() on a non-tablecaption flow") } /// If this is a table cell flow, returns the underlying object, borrowed mutably. Fails /// otherwise. fn as_mut_table_cell(&mut self) -> &mut TableCellFlow { panic!("called as_mut_table_cell() on a non-tablecell flow") } /// If this is a multicol flow, returns the underlying object, borrowed mutably. Fails /// otherwise. fn as_mut_multicol(&mut self) -> &mut MulticolFlow { panic!("called as_mut_multicol() on a non-multicol flow") } /// If this is a table cell flow, returns the underlying object. Fails otherwise. fn as_table_cell(&self) -> &TableCellFlow { panic!("called as_table_cell() on a non-tablecell flow") } // Main methods /// Pass 1 of reflow: computes minimum and preferred inline-sizes. /// /// Recursively (bottom-up) determine the flow's minimum and preferred inline-sizes. When /// called on this flow, all child flows have had their minimum and preferred inline-sizes set. /// This function must decide minimum/preferred inline-sizes based on its children's inline- /// sizes and the dimensions of any boxes it is responsible for flowing. fn bubble_inline_sizes(&mut self) { panic!("bubble_inline_sizes not yet implemented") } /// Pass 2 of reflow: computes inline-size. fn assign_inline_sizes(&mut self, _ctx: &LayoutContext) { panic!("assign_inline_sizes not yet implemented") } /// Pass 3a of reflow: computes block-size. fn assign_block_size(&mut self, _ctx: &LayoutContext) { panic!("assign_block_size not yet implemented") } /// Like `assign_block_size`, but is recurses explicitly into descendants. /// Fit as much content as possible within `available_block_size`. /// If that’s not all of it, truncate the contents of `self` /// and return a new flow similar to `self` with the rest of the content. /// /// The default is to make a flow "atomic": it can not be fragmented. fn fragment( &mut self, layout_context: &LayoutContext, _fragmentation_context: Option<FragmentationContext>, ) -> Option<Arc<dyn Flow>> { fn recursive_assign_block_size<F: ?Sized + Flow + GetBaseFlow>( flow: &mut F, ctx: &LayoutContext, ) { for child in flow.mut_base().child_iter_mut() { recursive_assign_block_size(child, ctx) } flow.assign_block_size(ctx); } recursive_assign_block_size(self, layout_context); None } fn collect_stacking_contexts(&mut self, state: &mut StackingContextCollectionState); /// If this is a float, places it. The default implementation does nothing. fn place_float_if_applicable<'a>(&mut self) {} /// Assigns block-sizes in-order; or, if this is a float, places the float. The default /// implementation simply assigns block-sizes if this flow might have floats in. Returns true /// if it was determined that this child might have had floats in or false otherwise. /// /// `parent_thread_id` is the thread ID of the parent. This is used for the layout tinting /// debug mode; if the block size of this flow was determined by its parent, we should treat /// it as laid out by its parent. fn assign_block_size_for_inorder_child_if_necessary( &mut self, layout_context: &LayoutContext, parent_thread_id: u8, _content_box: LogicalRect<Au>, ) -> bool { let might_have_floats_in_or_out = self.base().might_have_floats_in() || self.base().might_have_floats_out(); if might_have_floats_in_or_out { self.mut_base().thread_id = parent_thread_id; self.assign_block_size(layout_context); self.mut_base() .restyle_damage .remove(ServoRestyleDamage::REFLOW_OUT_OF_FLOW | ServoRestyleDamage::REFLOW); } might_have_floats_in_or_out } fn get_overflow_in_parent_coordinates(&self) -> Overflow { // FIXME(#2795): Get the real container size. let container_size = Size2D::zero(); let position = self .base() .position .to_physical(self.base().writing_mode, container_size); let mut overflow = self.base().overflow; match self.class() { FlowClass::Block | FlowClass::TableCaption | FlowClass::TableCell => {}, _ => { overflow.translate(&position.origin.to_vector()); return overflow; }, } let border_box = self.as_block().fragment.stacking_relative_border_box( &self.base().stacking_relative_position, &self .base() .early_absolute_position_info .relative_containing_block_size, self.base() .early_absolute_position_info .relative_containing_block_mode, CoordinateSystem::Own, ); if StyleOverflow::Visible != self.as_block().fragment.style.get_box().overflow_x { overflow.paint.origin.x = Au(0); overflow.paint.size.width = border_box.size.width; overflow.scroll.origin.x = Au(0); overflow.scroll.size.width = border_box.size.width; } if StyleOverflow::Visible != self.as_block().fragment.style.get_box().overflow_y { overflow.paint.origin.y = Au(0); overflow.paint.size.height = border_box.size.height; overflow.scroll.origin.y = Au(0); overflow.scroll.size.height = border_box.size.height; } if !self.as_block().fragment.establishes_stacking_context() || self.as_block() .fragment .style .get_box() .transform .0 .is_empty() { overflow.translate(&position.origin.to_vector()); return overflow; } // TODO: Take into account 3d transforms, even though it's a fairly // uncommon case. let transform_2d = self .as_block() .fragment .transform_matrix(&position) .unwrap_or(LayoutTransform::identity()) .to_2d() .to_untyped(); let transformed_overflow = Overflow { paint: f32_rect_to_au_rect( transform_2d.transform_rect(&au_rect_to_f32_rect(overflow.paint)), ), scroll: f32_rect_to_au_rect( transform_2d.transform_rect(&au_rect_to_f32_rect(overflow.scroll)), ), }; // TODO: We are taking the union of the overflow and transformed overflow here, which // happened implicitly in the previous version of this code. This will probably be // unnecessary once we are taking into account 3D transformations above. overflow.union(&transformed_overflow); overflow.translate(&position.origin.to_vector()); overflow } /// /// CSS Section 11.1 /// This is the union of rectangles of the flows for which we define the /// Containing Block. /// /// FIXME(pcwalton): This should not be a virtual method, but currently is due to a compiler /// bug ("the trait `Sized` is not implemented for `self`"). /// /// Assumption: This is called in a bottom-up traversal, so kids' overflows have /// already been set. /// Assumption: Absolute descendants have had their overflow calculated. fn store_overflow(&mut self, _: &LayoutContext) { // Calculate overflow on a per-fragment basis. let mut overflow = self.compute_overflow(); match self.class() { FlowClass::Block | FlowClass::TableCaption | FlowClass::TableCell => { for kid in self.mut_base().children.iter_mut() { overflow.union(&kid.get_overflow_in_parent_coordinates()); } }, _ => {}, } self.mut_base().overflow = overflow } /// Phase 4 of reflow: Compute the stacking-relative position (origin of the content box, /// in coordinates relative to the nearest ancestor stacking context). fn compute_stacking_relative_position(&mut self, _: &LayoutContext) { // The default implementation is a no-op. } /// Phase 5 of reflow: builds display lists. fn build_display_list(&mut self, state: &mut DisplayListBuildState); /// Returns the union of all overflow rects of all of this flow's fragments. fn compute_overflow(&self) -> Overflow; /// Iterates through border boxes of all of this flow's fragments. /// Level provides a zero based index indicating the current /// depth of the flow tree during fragment iteration. fn iterate_through_fragment_border_boxes( &self, iterator: &mut dyn FragmentBorderBoxIterator, level: i32, stacking_context_position: &Point2D<Au>, ); /// Mutably iterates through fragments in this flow. fn mutate_fragments(&mut self, mutator: &mut dyn FnMut(&mut Fragment)); fn compute_collapsible_block_start_margin( &mut self, _layout_context: &mut LayoutContext, _margin_collapse_info: &mut MarginCollapseInfo, ) { // The default implementation is a no-op. } /// Marks this flow as the root flow. The default implementation is a no-op. fn mark_as_root(&mut self) { debug!("called mark_as_root() on a flow of type {:?}", self.class()); panic!("called mark_as_root() on an unhandled flow"); } // Note that the following functions are mostly called using static method // dispatch, so it's ok to have them in this trait. Plus, they have // different behaviour for different types of Flow, so they can't go into // the Immutable / Mutable Flow Utils traits without additional casts. fn is_root(&self) -> bool { false } /// The 'position' property of this flow. fn positioning(&self) -> Position { Position::Static } /// Return true if this flow has position 'fixed'. fn is_fixed(&self) -> bool { self.positioning() == Position::Fixed } fn contains_positioned_fragments(&self) -> bool { self.contains_relatively_positioned_fragments() || self.base() .flags .contains(FlowFlags::IS_ABSOLUTELY_POSITIONED) } fn contains_relatively_positioned_fragments(&self) -> bool { self.positioning() == Position::Relative } /// Returns true if this is an absolute containing block. fn is_absolute_containing_block(&self) -> bool { self.contains_positioned_fragments() } /// Returns true if this flow contains fragments that are roots of an absolute flow tree. fn contains_roots_of_absolute_flow_tree(&self) -> bool { self.contains_relatively_positioned_fragments() || self.is_root() } /// Updates the inline position of a child flow during the assign-height traversal. At present, /// this is only used for absolutely-positioned inline-blocks. fn update_late_computed_inline_position_if_necessary(&mut self, inline_position: Au); /// Updates the block position of a child flow during the assign-height traversal. At present, /// this is only used for absolutely-positioned inline-blocks. fn update_late_computed_block_position_if_necessary(&mut self, block_position: Au); /// Return the size of the containing block generated by this flow for the absolutely- /// positioned descendant referenced by `for_flow`. For block flows, this is the padding box. /// /// NB: Do not change this `&self` to `&mut self` under any circumstances! It has security /// implications because this can be called on parents concurrently from descendants! fn generated_containing_block_size(&self, _: OpaqueFlow) -> LogicalSize<Au>; /// Attempts to perform incremental fixup of this flow by replacing its fragment's style with /// the new style. This can only succeed if the flow has exactly one fragment. fn repair_style(&mut self, new_style: &crate::ServoArc<ComputedValues>); /// Print any extra children (such as fragments) contained in this Flow /// for debugging purposes. Any items inserted into the tree will become /// children of this flow. fn print_extra_flow_children(&self, _: &mut PrintTree) {} fn clipping_and_scrolling(&self) -> ClippingAndScrolling { match self.base().clipping_and_scrolling { Some(info) => info, None => unreachable!("Tried to access scroll root id on Flow before assignment"), } } } pub trait ImmutableFlowUtils { // Convenience functions /// Returns true if this flow is a block flow or subclass thereof. fn is_block_like(self) -> bool; /// Returns true if this flow is a table flow. fn is_table(self) -> bool; /// Returns true if this flow is a table caption flow. fn is_table_caption(self) -> bool; /// Returns true if this flow is a proper table child. fn is_proper_table_child(self) -> bool; /// Returns true if this flow is a table row flow. fn is_table_row(self) -> bool; /// Returns true if this flow is a table cell flow. fn is_table_cell(self) -> bool; /// Returns true if this flow is a table colgroup flow. fn is_table_colgroup(self) -> bool; /// Returns true if this flow is a table rowgroup flow. fn is_table_rowgroup(self) -> bool; /// Returns true if this flow is one of table-related flows. fn is_table_kind(self) -> bool; /// Returns true if this flow has no children. fn is_leaf(self) -> bool; /// Returns the number of children that this flow possesses. fn child_count(self) -> usize; /// Return true if this flow is a Block Container. fn is_block_container(self) -> bool; /// Returns true if this flow is a block flow. fn is_block_flow(self) -> bool; /// Returns true if this flow is an inline flow. fn is_inline_flow(self) -> bool; /// Dumps the flow tree for debugging. fn print(self, title: String); /// Dumps the flow tree for debugging into the given PrintTree. fn print_with_tree(self, print_tree: &mut PrintTree); /// Returns true if floats might flow through this flow, as determined by the float placement /// speculation pass. fn floats_might_flow_through(self) -> bool; fn baseline_offset_of_last_line_box_in_flow(self) -> Option<Au>; } pub trait MutableFlowUtils { /// Calls `repair_style` and `bubble_inline_sizes`. You should use this method instead of /// calling them individually, since there is no reason not to perform both operations. fn repair_style_and_bubble_inline_sizes(self, style: &crate::ServoArc<ComputedValues>); } pub trait MutableOwnedFlowUtils { /// Set absolute descendants for this flow. /// /// Set this flow as the Containing Block for all the absolute descendants. fn set_absolute_descendants(&mut self, abs_descendants: AbsoluteDescendants); /// Sets the flow as the containing block for all absolute descendants that have been marked /// as having reached their containing block. This is needed in order to handle cases like: /// /// ```html /// <div> /// <span style="position: relative"> /// <span style="position: absolute; ..."></span> /// </span> /// </div> /// ``` fn take_applicable_absolute_descendants( &mut self, absolute_descendants: &mut AbsoluteDescendants, ); } #[derive(Clone, Copy, Debug, PartialEq, Serialize)] pub enum FlowClass { Block, Inline, ListItem, TableWrapper, Table, TableColGroup, TableRowGroup, TableRow, TableCaption, TableCell, Multicol, MulticolColumn, Flex, } impl FlowClass { fn is_block_like(self) -> bool { match self { FlowClass::Block | FlowClass::ListItem | FlowClass::Table | FlowClass::TableRowGroup | FlowClass::TableRow | FlowClass::TableCaption | FlowClass::TableCell | FlowClass::TableWrapper | FlowClass::Flex => true, _ => false, } } } bitflags! { #[doc = "Flags used in flows."] pub struct FlowFlags: u32 { // text align flags #[doc = "Whether this flow is absolutely positioned. This is checked all over layout, so a"] #[doc = "virtual call is too expensive."] const IS_ABSOLUTELY_POSITIONED = 0b0000_0000_0000_0000_0100_0000; #[doc = "Whether this flow clears to the left. This is checked all over layout, so a"] #[doc = "virtual call is too expensive."] const CLEARS_LEFT = 0b0000_0000_0000_0000_1000_0000; #[doc = "Whether this flow clears to the right. This is checked all over layout, so a"] #[doc = "virtual call is too expensive."] const CLEARS_RIGHT = 0b0000_0000_0000_0001_0000_0000; #[doc = "Whether this flow is left-floated. This is checked all over layout, so a"] #[doc = "virtual call is too expensive."] const FLOATS_LEFT = 0b0000_0000_0000_0010_0000_0000; #[doc = "Whether this flow is right-floated. This is checked all over layout, so a"] #[doc = "virtual call is too expensive."] const FLOATS_RIGHT = 0b0000_0000_0000_0100_0000_0000; #[doc = "Text alignment. \ NB: If you update this, update `TEXT_ALIGN_SHIFT` below."] const TEXT_ALIGN = 0b0000_0000_0111_1000_0000_0000; #[doc = "Whether this flow has a fragment with `counter-reset` or `counter-increment` \ styles."] const AFFECTS_COUNTERS = 0b0000_0000_1000_0000_0000_0000; #[doc = "Whether this flow's descendants have fragments that affect `counter-reset` or \ `counter-increment` styles."] const HAS_COUNTER_AFFECTING_CHILDREN = 0b0000_0001_0000_0000_0000_0000; #[doc = "Whether this flow behaves as though it had `position: static` for the purposes \ of positioning in the inline direction. This is set for flows with `position: \ static` and `position: relative` as well as absolutely-positioned flows with \ unconstrained positions in the inline direction."] const INLINE_POSITION_IS_STATIC = 0b0000_0010_0000_0000_0000_0000; #[doc = "Whether this flow behaves as though it had `position: static` for the purposes \ of positioning in the block direction. This is set for flows with `position: \ static` and `position: relative` as well as absolutely-positioned flows with \ unconstrained positions in the block direction."] const BLOCK_POSITION_IS_STATIC = 0b0000_0100_0000_0000_0000_0000; /// Whether any ancestor is a fragmentation container const CAN_BE_FRAGMENTED = 0b0000_1000_0000_0000_0000_0000; /// Whether this flow contains any text and/or replaced fragments. const CONTAINS_TEXT_OR_REPLACED_FRAGMENTS = 0b0001_0000_0000_0000_0000_0000; /// Whether margins are prohibited from collapsing with this flow. const MARGINS_CANNOT_COLLAPSE = 0b0010_0000_0000_0000_0000_0000; } } /// The number of bits we must shift off to handle the text alignment field. /// /// NB: If you update this, update `TEXT_ALIGN` above. static TEXT_ALIGN_SHIFT: usize = 11; impl FlowFlags { #[inline] pub fn text_align(self) -> TextAlign { TextAlign::from_u32((self & FlowFlags::TEXT_ALIGN).bits() >> TEXT_ALIGN_SHIFT).unwrap() } #[inline] pub fn set_text_align(&mut self, value: TextAlign) { *self = (*self & !FlowFlags::TEXT_ALIGN) | FlowFlags::from_bits((value as u32) << TEXT_ALIGN_SHIFT).unwrap(); } #[inline] pub fn float_kind(&self) -> Float { if self.contains(FlowFlags::FLOATS_LEFT) { Float::Left } else if self.contains(FlowFlags::FLOATS_RIGHT) { Float::Right } else { Float::None } } #[inline] pub fn is_float(&self) -> bool { self.contains(FlowFlags::FLOATS_LEFT) || self.contains(FlowFlags::FLOATS_RIGHT) } #[inline] pub fn clears_floats(&self) -> bool { self.contains(FlowFlags::CLEARS_LEFT) || self.contains(FlowFlags::CLEARS_RIGHT) } } /// Absolutely-positioned descendants of this flow. #[derive(Clone)] pub struct AbsoluteDescendants { /// Links to every descendant. This must be private because it is unsafe to leak `FlowRef`s to /// layout. descendant_links: Vec<AbsoluteDescendantInfo>, } impl AbsoluteDescendants { pub fn new() -> AbsoluteDescendants { AbsoluteDescendants { descendant_links: Vec::new(), } } pub fn len(&self) -> usize { self.descendant_links.len() } pub fn is_empty(&self) -> bool { self.descendant_links.is_empty() } pub fn push(&mut self, given_descendant: FlowRef) { self.descendant_links.push(AbsoluteDescendantInfo { flow: given_descendant, has_reached_containing_block: false, }); } /// Push the given descendants on to the existing descendants. /// /// Ignore any static y offsets, because they are None before layout. pub fn push_descendants(&mut self, given_descendants: AbsoluteDescendants) { for elem in given_descendants.descendant_links { self.descendant_links.push(elem); } } /// Return an iterator over the descendant flows. pub fn iter(&mut self) -> AbsoluteDescendantIter { AbsoluteDescendantIter { iter: self.descendant_links.iter_mut(), } } /// Mark these descendants as having reached their containing block. pub fn mark_as_having_reached_containing_block(&mut self) { for descendant_info in self.descendant_links.iter_mut() { descendant_info.has_reached_containing_block = true } } } /// Information about each absolutely-positioned descendant of the given flow. #[derive(Clone)] pub struct AbsoluteDescendantInfo { /// The absolute descendant flow in question. flow: FlowRef, /// Whether the absolute descendant has reached its containing block. This exists so that we /// can handle cases like the following: /// /// ```html /// <div> /// <span id=a style="position: absolute; ...">foo</span> /// <span style="position: relative"> /// <span id=b style="position: absolute; ...">bar</span> /// </span> /// </div> /// ``` /// /// When we go to create the `InlineFlow` for the outer `div`, our absolute descendants will /// be `a` and `b`. At this point, we need a way to distinguish between the two, because the /// containing block for `a` will be different from the containing block for `b`. Specifically, /// the latter's containing block is the inline flow itself, while the former's containing /// block is going to be some parent of the outer `div`. Hence we need this flag as a way to /// distinguish the two; it will be false for `a` and true for `b`. has_reached_containing_block: bool, } pub struct AbsoluteDescendantIter<'a> { iter: IterMut<'a, AbsoluteDescendantInfo>, } impl<'a> Iterator for AbsoluteDescendantIter<'a> { type Item = &'a mut dyn Flow; fn next(&mut self) -> Option<&'a mut dyn Flow> { self.iter .next() .map(|info| FlowRef::deref_mut(&mut info.flow)) } fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } } pub type AbsoluteDescendantOffsetIter<'a> = Zip<AbsoluteDescendantIter<'a>, IterMut<'a, Au>>; /// Information needed to compute absolute (i.e. viewport-relative) flow positions (not to be /// confused with absolutely-positioned flows) that is computed during block-size assignment. #[derive(Clone, Copy)] pub struct EarlyAbsolutePositionInfo { /// The size of the containing block for relatively-positioned descendants. pub relative_containing_block_size: LogicalSize<Au>, /// The writing mode for `relative_containing_block_size`. pub relative_containing_block_mode: WritingMode, } impl EarlyAbsolutePositionInfo { pub fn new(writing_mode: WritingMode) -> EarlyAbsolutePositionInfo { // FIXME(pcwalton): The initial relative containing block-size should be equal to the size // of the root layer. EarlyAbsolutePositionInfo { relative_containing_block_size: LogicalSize::zero(writing_mode), relative_containing_block_mode: writing_mode, } } } /// Information needed to compute absolute (i.e. viewport-relative) flow positions (not to be /// confused with absolutely-positioned flows) that is computed during final position assignment. #[derive(Clone, Copy, Serialize)] pub struct LateAbsolutePositionInfo { /// The position of the absolute containing block relative to the nearest ancestor stacking /// context. If the absolute containing block establishes the stacking context for this flow, /// and this flow is not itself absolutely-positioned, then this is (0, 0). pub stacking_relative_position_of_absolute_containing_block: Point2D<Au>, } impl LateAbsolutePositionInfo { pub fn new() -> LateAbsolutePositionInfo { LateAbsolutePositionInfo { stacking_relative_position_of_absolute_containing_block: Point2D::zero(), } } } #[derive(Clone, Copy, Debug)] pub struct FragmentationContext { pub available_block_size: Au, pub this_fragment_is_empty: bool, } /// Data common to all flows. pub struct BaseFlow { pub restyle_damage: RestyleDamage, /// The children of this flow. pub children: FlowList, /// Intrinsic inline sizes for this flow. pub intrinsic_inline_sizes: IntrinsicISizes, /// The upper left corner of the box representing this flow, relative to the box representing /// its parent flow. /// /// For absolute flows, this represents the position with respect to its *containing block*. /// /// This does not include margins in the block flow direction, because those can collapse. So /// for the block direction (usually vertical), this represents the *border box*. For the /// inline direction (usually horizontal), this represents the *margin box*. pub position: LogicalRect<Au>, /// The amount of overflow of this flow, relative to the containing block. Must include all the /// pixels of all the display list items for correct invalidation. pub overflow: Overflow, /// Data used during parallel traversals. /// /// TODO(pcwalton): Group with other transient data to save space. pub parallel: FlowParallelInfo, /// The floats next to this flow. pub floats: Floats, /// Metrics for floats in computed during the float metrics speculation phase. pub speculated_float_placement_in: SpeculatedFloatPlacement, /// Metrics for floats out computed during the float metrics speculation phase. pub speculated_float_placement_out: SpeculatedFloatPlacement, /// The collapsible margins for this flow, if any. pub collapsible_margins: CollapsibleMargins, /// The position of this flow relative to the start of the nearest ancestor stacking context. /// This is computed during the top-down pass of display list construction. pub stacking_relative_position: Vector2D<Au>, /// Details about descendants with position 'absolute' or 'fixed' for which we are the /// containing block. This is in tree order. This includes any direct children. pub abs_descendants: AbsoluteDescendants, /// The inline-size of the block container of this flow. Used for computing percentage and /// automatic values for `width`. pub block_container_inline_size: Au, /// The writing mode of the block container of this flow. /// /// FIXME (mbrubeck): Combine this and block_container_inline_size and maybe /// block_container_explicit_block_size into a struct, to guarantee they are set at the same /// time? Or just store a link to the containing block flow. pub block_container_writing_mode: WritingMode, /// The block-size of the block container of this flow, if it is an explicit size (does not /// depend on content heights). Used for computing percentage values for `height`. pub block_container_explicit_block_size: Option<Au>, /// Reference to the Containing Block, if this flow is absolutely positioned. pub absolute_cb: ContainingBlockLink, /// Information needed to compute absolute (i.e. viewport-relative) flow positions (not to be /// confused with absolutely-positioned flows) that is computed during block-size assignment. pub early_absolute_position_info: EarlyAbsolutePositionInfo, /// Information needed to compute absolute (i.e. viewport-relative) flow positions (not to be /// confused with absolutely-positioned flows) that is computed during final position /// assignment. pub late_absolute_position_info: LateAbsolutePositionInfo, /// The clipping rectangle for this flow and its descendants, in the coordinate system of the /// nearest ancestor stacking context. If this flow itself represents a stacking context, then /// this is in the flow's own coordinate system. pub clip: Rect<Au>, /// The writing mode for this flow. pub writing_mode: WritingMode, /// For debugging and profiling, the identifier of the thread that laid out this fragment. pub thread_id: u8, /// Various flags for flows, tightly packed to save space. pub flags: FlowFlags, /// The ID of the StackingContext that contains this flow. This is initialized /// to 0, but it assigned during the collect_stacking_contexts phase of display /// list construction. pub stacking_context_id: StackingContextId, /// The indices of this Flow's ClipScrollNode. This is used to place the node's /// display items into scrolling frames and clipping nodes. pub clipping_and_scrolling: Option<ClippingAndScrolling>, } impl fmt::Debug for BaseFlow { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let child_count = self.parallel.children_count.load(Ordering::SeqCst); let child_count_string = if child_count > 0 { format!("\nchildren={}", child_count) } else { "".to_owned() }; let absolute_descendants_string = if self.abs_descendants.len() > 0 { format!("\nabs-descendents={}", self.abs_descendants.len()) } else { "".to_owned() }; let damage_string = if self.restyle_damage != RestyleDamage::empty() { format!("\ndamage={:?}", self.restyle_damage) } else { "".to_owned() }; write!( f, "\nsc={:?}\ \npos={:?}{}{}\ \nfloatspec-in={:?}\ \nfloatspec-out={:?}\ \noverflow={:?}{}{}{}", self.stacking_context_id, self.position, if self.flags.contains(FlowFlags::FLOATS_LEFT) { "FL" } else { "" }, if self.flags.contains(FlowFlags::FLOATS_RIGHT) { "FR" } else { "" }, self.speculated_float_placement_in, self.speculated_float_placement_out, self.overflow, child_count_string, absolute_descendants_string, damage_string ) } } impl Serialize for BaseFlow { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { let mut serializer = serializer.serialize_struct("base", 5)?; serializer.serialize_field("id", &self.debug_id())?; serializer.serialize_field( "stacking_relative_position", &self.stacking_relative_position, )?; serializer.serialize_field("intrinsic_inline_sizes", &self.intrinsic_inline_sizes)?; serializer.serialize_field("position", &self.position)?; serializer.serialize_field("children", &self.children)?; serializer.end() } } /// Whether a base flow should be forced to be nonfloated. This can affect e.g. `TableFlow`, which /// is never floated because the table wrapper flow is the floated one. #[derive(Clone, PartialEq)] pub enum ForceNonfloatedFlag { /// The flow should be floated if the node has a `float` property. FloatIfNecessary, /// The flow should be forced to be nonfloated. ForceNonfloated, } impl BaseFlow { #[inline] pub fn new( style: Option<&ComputedValues>, writing_mode: WritingMode, force_nonfloated: ForceNonfloatedFlag, ) -> BaseFlow { let mut flags = FlowFlags::empty(); match style { Some(style) => { if style.can_be_fragmented() { flags.insert(FlowFlags::CAN_BE_FRAGMENTED); } match style.get_box().position { Position::Absolute | Position::Fixed => { flags.insert(FlowFlags::IS_ABSOLUTELY_POSITIONED); let logical_position = style.logical_position(); if logical_position.inline_start == LengthPercentageOrAuto::Auto && logical_position.inline_end == LengthPercentageOrAuto::Auto { flags.insert(FlowFlags::INLINE_POSITION_IS_STATIC); } if logical_position.block_start == LengthPercentageOrAuto::Auto && logical_position.block_end == LengthPercentageOrAuto::Auto { flags.insert(FlowFlags::BLOCK_POSITION_IS_STATIC); } }, _ => flags.insert( FlowFlags::BLOCK_POSITION_IS_STATIC | FlowFlags::INLINE_POSITION_IS_STATIC, ), } if force_nonfloated == ForceNonfloatedFlag::FloatIfNecessary { match style.get_box().float { Float::None => {}, Float::Left => flags.insert(FlowFlags::FLOATS_LEFT), Float::Right => flags.insert(FlowFlags::FLOATS_RIGHT), } } match style.get_box().clear { Clear::None => {}, Clear::Left => flags.insert(FlowFlags::CLEARS_LEFT), Clear::Right => flags.insert(FlowFlags::CLEARS_RIGHT), Clear::Both => { flags.insert(FlowFlags::CLEARS_LEFT); flags.insert(FlowFlags::CLEARS_RIGHT); }, } if !style.get_counters().counter_reset.is_empty() || !style.get_counters().counter_increment.is_empty() { flags.insert(FlowFlags::AFFECTS_COUNTERS) } }, None => flags .insert(FlowFlags::BLOCK_POSITION_IS_STATIC | FlowFlags::INLINE_POSITION_IS_STATIC), } // New flows start out as fully damaged. let mut damage = RestyleDamage::rebuild_and_reflow(); damage.remove(ServoRestyleDamage::RECONSTRUCT_FLOW); BaseFlow { restyle_damage: damage, children: FlowList::new(), intrinsic_inline_sizes: IntrinsicISizes::new(), position: LogicalRect::zero(writing_mode), overflow: Overflow::new(), parallel: FlowParallelInfo::new(), floats: Floats::new(writing_mode), collapsible_margins: CollapsibleMargins::new(), stacking_relative_position: Vector2D::zero(), abs_descendants: AbsoluteDescendants::new(), speculated_float_placement_in: SpeculatedFloatPlacement::zero(), speculated_float_placement_out: SpeculatedFloatPlacement::zero(), block_container_inline_size: Au(0), block_container_writing_mode: writing_mode, block_container_explicit_block_size: None, absolute_cb: ContainingBlockLink::new(), early_absolute_position_info: EarlyAbsolutePositionInfo::new(writing_mode), late_absolute_position_info: LateAbsolutePositionInfo::new(), clip: MaxRect::max_rect(), flags: flags, writing_mode: writing_mode, thread_id: 0, stacking_context_id: StackingContextId::root(), clipping_and_scrolling: None, } } /// Update the 'flags' field when computed styles have changed. /// /// These flags are initially set during flow construction. They only need to be updated here /// if they are based on properties that can change without triggering `RECONSTRUCT_FLOW`. pub fn update_flags_if_needed(&mut self, style: &ComputedValues) { // For absolutely-positioned flows, changes to top/bottom/left/right can cause these flags // to get out of date: if self .restyle_damage .contains(ServoRestyleDamage::REFLOW_OUT_OF_FLOW) { // Note: We don't need to check whether IS_ABSOLUTELY_POSITIONED has changed, because // changes to the 'position' property trigger flow reconstruction. if self.flags.contains(FlowFlags::IS_ABSOLUTELY_POSITIONED) { let logical_position = style.logical_position(); self.flags.set( FlowFlags::INLINE_POSITION_IS_STATIC, logical_position.inline_start == LengthPercentageOrAuto::Auto && logical_position.inline_end == LengthPercentageOrAuto::Auto, ); self.flags.set( FlowFlags::BLOCK_POSITION_IS_STATIC, logical_position.block_start == LengthPercentageOrAuto::Auto && logical_position.block_end == LengthPercentageOrAuto::Auto, ); } } } /// Return a new BaseFlow like this one but with the given children list pub fn clone_with_children(&self, children: FlowList) -> BaseFlow { BaseFlow { children: children, restyle_damage: self.restyle_damage | ServoRestyleDamage::REPAINT | ServoRestyleDamage::REFLOW_OUT_OF_FLOW | ServoRestyleDamage::REFLOW, parallel: FlowParallelInfo::new(), floats: self.floats.clone(), abs_descendants: self.abs_descendants.clone(), absolute_cb: self.absolute_cb.clone(), clip: self.clip.clone(), ..*self } } /// Iterates over the children of this immutable flow. pub fn child_iter(&self) -> FlowListIterator { self.children.iter() } pub fn child_iter_mut(&mut self) -> MutFlowListIterator { self.children.iter_mut() } pub fn debug_id(&self) -> usize { let p = self as *const _; p as usize } pub fn flow_id(&self) -> usize { return self as *const BaseFlow as usize; } pub fn collect_stacking_contexts_for_children( &mut self, state: &mut StackingContextCollectionState, ) { for kid in self.children.iter_mut() { kid.collect_stacking_contexts(state); } } #[inline] pub fn might_have_floats_in(&self) -> bool { self.speculated_float_placement_in.left > Au(0) || self.speculated_float_placement_in.right > Au(0) } #[inline] pub fn might_have_floats_out(&self) -> bool { self.speculated_float_placement_out.left > Au(0) || self.speculated_float_placement_out.right > Au(0) } /// Compute the fragment position relative to the parent stacking context. If the fragment /// itself establishes a stacking context, then the origin of its position will be (0, 0) /// for the purposes of this computation. pub fn stacking_relative_border_box_for_display_list(&self, fragment: &Fragment) -> Rect<Au> { fragment.stacking_relative_border_box( &self.stacking_relative_position, &self .early_absolute_position_info .relative_containing_block_size, self.early_absolute_position_info .relative_containing_block_mode, CoordinateSystem::Own, ) } } impl<'a> ImmutableFlowUtils for &'a dyn Flow { /// Returns true if this flow is a block flow or subclass thereof. fn is_block_like(self) -> bool { self.class().is_block_like() } /// Returns true if this flow is a proper table child. /// 'Proper table child' is defined as table-row flow, table-rowgroup flow, /// table-column-group flow, or table-caption flow. fn is_proper_table_child(self) -> bool { match self.class() { FlowClass::TableRow | FlowClass::TableRowGroup | FlowClass::TableColGroup | FlowClass::TableCaption => true, _ => false, } } /// Returns true if this flow is a table row flow. fn is_table_row(self) -> bool { match self.class() { FlowClass::TableRow => true, _ => false, } } /// Returns true if this flow is a table cell flow.<|fim▁hole|> _ => false, } } /// Returns true if this flow is a table colgroup flow. fn is_table_colgroup(self) -> bool { match self.class() { FlowClass::TableColGroup => true, _ => false, } } /// Returns true if this flow is a table flow. fn is_table(self) -> bool { match self.class() { FlowClass::Table => true, _ => false, } } /// Returns true if this flow is a table caption flow. fn is_table_caption(self) -> bool { match self.class() { FlowClass::TableCaption => true, _ => false, } } /// Returns true if this flow is a table rowgroup flow. fn is_table_rowgroup(self) -> bool { match self.class() { FlowClass::TableRowGroup => true, _ => false, } } /// Returns true if this flow is one of table-related flows. fn is_table_kind(self) -> bool { match self.class() { FlowClass::TableWrapper | FlowClass::Table | FlowClass::TableColGroup | FlowClass::TableRowGroup | FlowClass::TableRow | FlowClass::TableCaption | FlowClass::TableCell => true, _ => false, } } /// Returns true if this flow has no children. fn is_leaf(self) -> bool { self.base().children.is_empty() } /// Returns the number of children that this flow possesses. fn child_count(self) -> usize { self.base().children.len() } /// Return true if this flow is a Block Container. /// /// Except for table fragments and replaced elements, block-level fragments (`BlockFlow`) are /// also block container fragments. /// Non-replaced inline blocks and non-replaced table cells are also block /// containers. fn is_block_container(self) -> bool { match self.class() { // TODO: Change this when inline-blocks are supported. FlowClass::Block | FlowClass::TableCaption | FlowClass::TableCell => { // FIXME: Actually check the type of the node self.child_count() != 0 }, _ => false, } } /// Returns true if this flow is a block flow. fn is_block_flow(self) -> bool { match self.class() { FlowClass::Block => true, _ => false, } } /// Returns true if this flow is an inline flow. fn is_inline_flow(self) -> bool { match self.class() { FlowClass::Inline => true, _ => false, } } /// Dumps the flow tree for debugging. fn print(self, title: String) { let mut print_tree = PrintTree::new(title); self.print_with_tree(&mut print_tree); } /// Dumps the flow tree for debugging into the given PrintTree. fn print_with_tree(self, print_tree: &mut PrintTree) { print_tree.new_level(format!("{:?}", self)); self.print_extra_flow_children(print_tree); for kid in self.base().child_iter() { kid.print_with_tree(print_tree); } print_tree.end_level(); } fn floats_might_flow_through(self) -> bool { if !self.base().might_have_floats_in() && !self.base().might_have_floats_out() { return false; } if self.is_root() { return false; } if !self.is_block_like() { return true; } self.as_block().formatting_context_type() == FormattingContextType::None } fn baseline_offset_of_last_line_box_in_flow(self) -> Option<Au> { for kid in self.base().children.iter().rev() { if kid.is_inline_flow() { if let Some(baseline_offset) = kid.as_inline().baseline_offset_of_last_line() { return Some(kid.base().position.start.b + baseline_offset); } } if kid.is_block_like() && !kid.base() .flags .contains(FlowFlags::IS_ABSOLUTELY_POSITIONED) { if let Some(baseline_offset) = kid.baseline_offset_of_last_line_box_in_flow() { return Some(kid.base().position.start.b + baseline_offset); } } } None } } impl<'a> MutableFlowUtils for &'a mut dyn Flow { /// Calls `repair_style` and `bubble_inline_sizes`. You should use this method instead of /// calling them individually, since there is no reason not to perform both operations. fn repair_style_and_bubble_inline_sizes(self, style: &crate::ServoArc<ComputedValues>) { self.repair_style(style); self.mut_base().update_flags_if_needed(style); self.bubble_inline_sizes(); } } impl MutableOwnedFlowUtils for FlowRef { /// Set absolute descendants for this flow. /// /// Set yourself as the Containing Block for all the absolute descendants. /// /// This is called during flow construction, so nothing else can be accessing the descendant /// flows. This is enforced by the fact that we have a mutable `FlowRef`, which only flow /// construction is allowed to possess. fn set_absolute_descendants(&mut self, abs_descendants: AbsoluteDescendants) { let this = self.clone(); let base = FlowRef::deref_mut(self).mut_base(); base.abs_descendants = abs_descendants; for descendant_link in base.abs_descendants.descendant_links.iter_mut() { debug_assert!(!descendant_link.has_reached_containing_block); let descendant_base = FlowRef::deref_mut(&mut descendant_link.flow).mut_base(); descendant_base.absolute_cb.set(this.clone()); } } /// Sets the flow as the containing block for all absolute descendants that have been marked /// as having reached their containing block. This is needed in order to handle cases like: /// /// ```html /// <div> /// <span style="position: relative"> /// <span style="position: absolute; ..."></span> /// </span> /// </div> /// ``` fn take_applicable_absolute_descendants( &mut self, absolute_descendants: &mut AbsoluteDescendants, ) { let mut applicable_absolute_descendants = AbsoluteDescendants::new(); for absolute_descendant in absolute_descendants.descendant_links.iter() { if absolute_descendant.has_reached_containing_block { applicable_absolute_descendants.push(absolute_descendant.flow.clone()); } } absolute_descendants .descendant_links .retain(|descendant| !descendant.has_reached_containing_block); let this = self.clone(); let base = FlowRef::deref_mut(self).mut_base(); base.abs_descendants = applicable_absolute_descendants; for descendant_link in base.abs_descendants.iter() { let descendant_base = descendant_link.mut_base(); descendant_base.absolute_cb.set(this.clone()); } } } /// A link to a flow's containing block. /// /// This cannot safely be a `Flow` pointer because this is a pointer *up* the tree, not *down* the /// tree. A pointer up the tree is unsafe during layout because it can be used to access a node /// with an immutable reference while that same node is being laid out, causing possible iterator /// invalidation and use-after-free. /// /// FIXME(pcwalton): I think this would be better with a borrow flag instead of `unsafe`. #[derive(Clone)] pub struct ContainingBlockLink { /// The pointer up to the containing block. link: Option<WeakFlowRef>, } impl ContainingBlockLink { fn new() -> ContainingBlockLink { ContainingBlockLink { link: None } } fn set(&mut self, link: FlowRef) { self.link = Some(FlowRef::downgrade(&link)) } #[inline] pub fn generated_containing_block_size(&self, for_flow: OpaqueFlow) -> LogicalSize<Au> { match self.link { None => panic!( "Link to containing block not established; perhaps you forgot to call \ `set_absolute_descendants`?" ), Some(ref link) => { let flow = link.upgrade().unwrap(); flow.generated_containing_block_size(for_flow) }, } } #[inline] pub fn explicit_block_containing_size( &self, shared_context: &SharedStyleContext, ) -> Option<Au> { match self.link { None => panic!( "Link to containing block not established; perhaps you forgot to call \ `set_absolute_descendants`?" ), Some(ref link) => { let flow = link.upgrade().unwrap(); if flow.is_block_like() { flow.as_block() .explicit_block_containing_size(shared_context) } else if flow.is_inline_flow() { Some(flow.as_inline().minimum_line_metrics.space_above_baseline) } else { None } }, } } } /// A wrapper for the pointer address of a flow. These pointer addresses may only be compared for /// equality with other such pointer addresses, never dereferenced. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct OpaqueFlow(pub usize); impl OpaqueFlow { pub fn from_flow(flow: &dyn Flow) -> OpaqueFlow { let object_ptr: *const dyn Flow = flow; let data_ptr = object_ptr as *const (); OpaqueFlow(data_ptr as usize) } }<|fim▁end|>
fn is_table_cell(self) -> bool { match self.class() { FlowClass::TableCell => true,
<|file_name|>naver.py<|end_file_name|><|fim▁begin|># coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, update_url_query, ) class NaverIE(InfoExtractor): _VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/v/(?P<id>\d+)' _TESTS = [{ 'url': 'http://tv.naver.com/v/81652', 'info_dict': { 'id': '81652', 'ext': 'mp4', 'title': '[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번', 'description': '합격불변의 법칙 메가스터디 | 메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.', 'upload_date': '20130903', }, }, { 'url': 'http://tv.naver.com/v/395837', 'md5': '638ed4c12012c458fefcddfd01f173cd', 'info_dict': { 'id': '395837', 'ext': 'mp4', 'title': '9년이 지나도 아픈 기억, 전효성의 아버지', 'description': 'md5:5bf200dcbf4b66eb1b350d1eb9c753f7', 'upload_date': '20150519', }, 'skip': 'Georestricted', }, { 'url': 'http://tvcast.naver.com/v/81652',<|fim▁hole|> 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) vid = self._search_regex( r'videoId["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'video id', fatal=None, group='value') in_key = self._search_regex( r'inKey["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'key', default=None, group='value') if not vid or not in_key: error = self._html_search_regex( r'(?s)<div class="(?:nation_error|nation_box|error_box)">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>', webpage, 'error', default=None) if error: raise ExtractorError(error, expected=True) raise ExtractorError('couldn\'t extract vid and key') video_data = self._download_json( 'http://play.rmcnmv.naver.com/vod/play/v2.0/' + vid, video_id, query={ 'key': in_key, }) meta = video_data['meta'] title = meta['subject'] formats = [] def extract_formats(streams, stream_type, query={}): for stream in streams: stream_url = stream.get('source') if not stream_url: continue stream_url = update_url_query(stream_url, query) encoding_option = stream.get('encodingOption', {}) bitrate = stream.get('bitrate', {}) formats.append({ 'format_id': '%s_%s' % (stream.get('type') or stream_type, encoding_option.get('id') or encoding_option.get('name')), 'url': stream_url, 'width': int_or_none(encoding_option.get('width')), 'height': int_or_none(encoding_option.get('height')), 'vbr': int_or_none(bitrate.get('video')), 'abr': int_or_none(bitrate.get('audio')), 'filesize': int_or_none(stream.get('size')), 'protocol': 'm3u8_native' if stream_type == 'HLS' else None, }) extract_formats(video_data.get('videos', {}).get('list', []), 'H264') for stream_set in video_data.get('streams', []): query = {} for param in stream_set.get('keys', []): query[param['name']] = param['value'] stream_type = stream_set.get('type') videos = stream_set.get('videos') if videos: extract_formats(videos, stream_type, query) elif stream_type == 'HLS': stream_url = stream_set.get('source') if not stream_url: continue formats.extend(self._extract_m3u8_formats( update_url_query(stream_url, query), video_id, 'mp4', 'm3u8_native', m3u8_id=stream_type, fatal=False)) self._sort_formats(formats) subtitles = {} for caption in video_data.get('captions', {}).get('list', []): caption_url = caption.get('source') if not caption_url: continue subtitles.setdefault(caption.get('language') or caption.get('locale'), []).append({ 'url': caption_url, }) upload_date = self._search_regex( r'<span[^>]+class="date".*?(\d{4}\.\d{2}\.\d{2})', webpage, 'upload date', fatal=False) if upload_date: upload_date = upload_date.replace('.', '') return { 'id': video_id, 'title': title, 'formats': formats, 'subtitles': subtitles, 'description': self._og_search_description(webpage), 'thumbnail': meta.get('cover', {}).get('source') or self._og_search_thumbnail(webpage), 'view_count': int_or_none(meta.get('count')), 'upload_date': upload_date, }<|fim▁end|>
<|file_name|>control.py<|end_file_name|><|fim▁begin|>import os import rospy import rospkg from python_qt_binding import QT_BINDING from rqt_gui_py.plugin import Plugin from python_qt_binding import loadUi from python_qt_binding.QtCore import QTimer # Attempt to load QWidget from pyqt4 try: from python_qt_binding.QtGui import QWidget # if not load from pyqt5 except ImportError: from python_qt_binding.QtWidgets import QWidget from robosub_msgs.msg import control, control_status state_types = { 0: "NONE", 1: "ABSOLUTE", 2: "RELATIVE", 3: "ERROR" } class Control(Plugin): def __init__(self, context): super(Control, self).__init__(context) # Give QObjects reasonable names self.setObjectName('Control') # Create QWidget self._widget = QWidget() # Get path to UI file which should be in the "resource" folder of # this package ui_file = os.path.join(rospkg.RosPack().get_path('robosub'), 'src/rqt/rqt_control/resource', 'Control.ui') # Extend the widget with all attributes and children from UI file loadUi(ui_file, self._widget) self.control_timer = QTimer(self) self.control_timer.timeout.connect(self.control_missed) self.control_timer.start(1000) self.control_status_timer = QTimer(self) self.control_status_timer.timeout.connect(self.control_status_missed) self.control_status_timer.start(1000) # Give QObjects reasonable names self._widget.setObjectName('Control') if context.serial_number() > 1: self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number())) # Add widget to the user interface context.add_widget(self._widget) self._widget.statusActive.hide() self._widget.controlActive.hide() self.con_sub = rospy.Subscriber('control', control, self.control_callback, queue_size=1) self.cs_sub = rospy.Subscriber('control_status', control_status, self.control_status_callback, queue_size=1) img_file = os.path.join(rospkg.RosPack().get_path('robosub'), 'src/rqt/resource/robosub_logo.png') self._widget.setStyleSheet(".QWidget {background-image: url(" + img_file + "); background-repeat: no-repeat;" + "background-position:bottom right}") def control_missed(self): if not self._widget.controlStale.isVisible(): self._widget.controlStale.show() self._widget.controlActive.hide() def control_status_missed(self): if not self._widget.statusStale.isVisible(): self._widget.statusStale.show() self._widget.statusActive.hide() def control_status_callback(self, m): try: self.control_status_timer.stop() except RuntimeError: pass if self._widget.statusStale.isVisible(): self._widget.statusStale.setVisible(False) self._widget.statusActive.setVisible(True) # Set the states self._widget.forwardStatusState.setText(m.forward_state) self._widget.strafeStatusState.setText(m.strafe_left_state) self._widget.diveStatusState.setText(m.dive_state) self._widget.rollStatusState.setText(m.roll_right_state) self._widget.pitchStatusState.setText(m.pitch_down_state) self._widget.yawStatusState.setText(m.yaw_left_state) self._widget.forwardGoal.setText("{:.4f}".format(m.forward_goal)) self._widget.strafeGoal.setText("{:.4f}".format(m.strafe_left_goal))<|fim▁hole|> self._widget.diveGoal.setText("{:.4f}".format(m.dive_goal)) self._widget.rollGoal.setText("{:.4f}".format(m.roll_right_goal)) self._widget.pitchGoal.setText("{:.4f}".format(m.pitch_down_goal)) self._widget.yawGoal.setText("{:.4f}".format(m.yaw_left_goal)) self.control_status_timer.start(1000) def control_callback(self, m): try: self.control_timer.stop() except RuntimeError: pass if self._widget.controlStale.isVisible(): self._widget.controlStale.hide() self._widget.controlActive.show() # Set the states self._widget.forwardState.setText(state_types[m.forward_state]) self._widget.strafeState.setText(state_types[m.strafe_state]) self._widget.diveState.setText(state_types[m.dive_state]) self._widget.rollState.setText(state_types[m.roll_state]) self._widget.pitchState.setText(state_types[m.pitch_state]) self._widget.yawState.setText(state_types[m.yaw_state]) self._widget.forwardValue.setText("{:.4f}".format(m.forward)) self._widget.strafeValue.setText("{:.4f}".format(m.strafe_left)) self._widget.diveValue.setText("{:.4f}".format(m.dive)) self._widget.rollValue.setText("{:.4f}".format(m.roll_right)) self._widget.pitchValue.setText("{:.4f}".format(m.pitch_down)) self._widget.yawValue.setText("{:.4f}".format(m.yaw_left)) self.control_timer.start(1000) def shutdown_plugin(self): self.cs_sub.unregister() self.con_sub.unregister() self.control_timer.stop() self.control_status_timer.stop() def save_settings(self, plugin_settings, instance_settings): # TODO save intrinsic configuration, usually using: # instance_settings.set_value(k, v) pass def restore_settings(self, plugin_settings, instance_settings): # TODO restore intrinsic configuration, usually using: # v = instance_settings.value(k) pass<|fim▁end|>
<|file_name|>fluidsub_test.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- """ Tests. """ import unittest from bruges.rockphysics import fluidsub # Inputs... GAS case vp_gas = 2429.0 vs_gas = 1462.4 rho_gas = 2080. # Expected outputs... BRINE case vp_brine = 2850.5 vs_brine = 1416.1 rho_brine = 2210.0 phi = 0.275 # Don't know this... reading from fig rhohc = 250.0 # gas rhow = 1040.0 # brine sw = 0.3 # Don't know this... just guessing swnew = 1.0 # Don't know this... just guessing khc = 207000000.0 # gas kw = 2950000000.0 # brine kclay = 25000000000.0 kqtz = 37000000000.0 vclay = 0.05 kmin = 36266406250.0 # Don't know this... reading from fig class FluidsubTest(unittest.TestCase): """ Tests fluid sub calculations against Smith et al 2003. https://dl.dropboxusercontent.com/u/14965965/Smith_etal_2003.pdf """ def test_avseth(self): # Base case: gas # Subbing with: brine sub = fluidsub.avseth_fluidsub(vp=vp_gas, vs=vs_gas, rho=rho_gas, phi=phi, rhof1=rhohc, rhof2=rhow, kmin=37000000000, kf1=khc, kf2=kw) self.assertAlmostEqual(sub[0], vp_brine, places=-1) # Cannot match :( self.assertAlmostEqual(sub[1], vs_brine, places=-1) # Cannot match :( self.assertAlmostEqual(sub[2], rho_brine, places=-1) # Cannot match :( def test_smith(self): # Base case: gas # Subbing with: brine sub = fluidsub.smith_fluidsub(vp=vp_gas, vs=vs_gas, rho=rho_gas, phi=phi, rhohc=rhohc,<|fim▁hole|> kw=kw, kclay=kclay, kqtz=kqtz, vclay=vclay) self.assertAlmostEqual(sub[0], vp_brine, places=-1) self.assertAlmostEqual(sub[1], vs_brine, places=-1) self.assertAlmostEqual(sub[2], rho_brine, places=-1) # Cannot match :( if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(FluidsubTest) unittest.TextTestRunner(verbosity=2).run(suite)<|fim▁end|>
rhow=rhow, sw=sw, swnew=swnew, khc=khc,
<|file_name|>signus.rs<|end_file_name|><|fim▁begin|>use futures::Future; use indy::did; use settings; use utils::libindy::wallet::get_wallet_handle; use error::prelude::*;<|fim▁hole|> if settings::indy_mocks_enabled() { return Ok((::utils::constants::DID.to_string(), ::utils::constants::VERKEY.to_string())); } let my_did_json = json!({"seed": seed, "method_name": method_name}); did::create_and_store_my_did(get_wallet_handle(), &my_did_json.to_string()) .wait() .map_err(VcxError::from) } pub fn get_local_verkey(did: &str) -> VcxResult<String> { if settings::indy_mocks_enabled() { return Ok(::utils::constants::VERKEY.to_string()); } did::key_for_local_did(get_wallet_handle(), did) .wait() .map_err(VcxError::from) }<|fim▁end|>
pub fn create_and_store_my_did(seed: Option<&str>, method_name: Option<&str>) -> VcxResult<(String, String)> {
<|file_name|>_separatethousands.py<|end_file_name|><|fim▁begin|>import _plotly_utils.basevalidators <|fim▁hole|> self, plotly_name="separatethousands", parent_name="scattermapbox.marker.colorbar", **kwargs ): super(SeparatethousandsValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), **kwargs )<|fim▁end|>
class SeparatethousandsValidator(_plotly_utils.basevalidators.BooleanValidator): def __init__(
<|file_name|>guard.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Machinery to conditionally expose things. use js::jsapi::JSContext; use js::rust::HandleObject; use servo_config::prefs::PREFS; /// A container with a condition. pub struct Guard<T: Clone + Copy> { condition: Condition, value: T, } impl<T: Clone + Copy> Guard<T> { /// Construct a new guarded value. pub const fn new(condition: Condition, value: T) -> Self { Guard { condition: condition, value: value, } } /// Expose the value if the condition is satisfied. /// /// The passed handle is the object on which the value may be exposed. pub unsafe fn expose(&self, cx: *mut JSContext, obj: HandleObject) -> Option<T> { if self.condition.is_satisfied(cx, obj) { Some(self.value) } else { None } } }<|fim▁hole|>pub enum Condition { /// The condition is satisfied if the function returns true. Func(unsafe fn(*mut JSContext, HandleObject) -> bool), /// The condition is satisfied if the preference is set. Pref(&'static str), /// The condition is always satisfied. Satisfied, } impl Condition { unsafe fn is_satisfied(&self, cx: *mut JSContext, obj: HandleObject) -> bool { match *self { Condition::Pref(name) => PREFS.get(name).as_boolean().unwrap_or(false), Condition::Func(f) => f(cx, obj), Condition::Satisfied => true, } } }<|fim▁end|>
/// A condition to expose things.
<|file_name|>process_start_request.go<|end_file_name|><|fim▁begin|>package dto <|fim▁hole|>type ProcessStartRequest struct { Variables VariableList `json:"variables"` }<|fim▁end|>
<|file_name|>draw_cache.rs<|end_file_name|><|fim▁begin|>use std::cell::RefCell; use std::fmt; use ggez::{ graphics::{BlendMode, DrawParam, Drawable}, Context, GameResult, }; pub trait TryIntoDrawable<T> where T: Drawable, { fn try_into_drawable(&self, ctx: &mut Context) -> GameResult<T>; } pub struct DrawCache<T, U> where T: TryIntoDrawable<U>, U: Drawable, { data: T, cache: RefCell<Option<U>>, } impl<T, U> DrawCache<T, U> where T: TryIntoDrawable<U>, U: Drawable, { pub fn new(data: T) -> Self { Self { data, cache: RefCell::new(None), } } } impl<T, U> AsRef<T> for DrawCache<T, U> where T: TryIntoDrawable<U>, U: Drawable, { fn as_ref(&self) -> &T { &self.data } } impl<T, U> fmt::Debug for DrawCache<T, U> where T: TryIntoDrawable<U> + fmt::Debug, U: Drawable, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self.data) } } <|fim▁hole|> T: TryIntoDrawable<U>, U: Drawable, { fn draw_ex(&self, ctx: &mut Context, param: DrawParam) -> GameResult<()> { let is_cached = self.cache.borrow().is_some(); if !is_cached { let drawable = self.data.try_into_drawable(ctx)?; self.cache.replace(Some(drawable)); } self.cache .borrow_mut() .as_mut() .unwrap() .draw_ex(ctx, param) } fn set_blend_mode(&mut self, mode: Option<BlendMode>) { if let Some(ref mut drawable) = *self.cache.get_mut() { drawable.set_blend_mode(mode); } } fn get_blend_mode(&self) -> Option<BlendMode> { match *self.cache.borrow() { None => None, Some(ref drawable) => drawable.get_blend_mode(), } } }<|fim▁end|>
impl<T, U> Drawable for DrawCache<T, U> where
<|file_name|>move_export.py<|end_file_name|><|fim▁begin|>from openerp import models, api, _ from openerp.exceptions import UserError class ExportMoveExport(models.TransientModel): _name = 'export.move.export' _description = 'Export Moves' @api.multi<|fim▁hole|> def create_export_file(self): context = dict(self._context or {}) moves = self.env['export.move'].browse(context.get('active_ids')) export_to_create = self.env['export.move'] for move in moves: if move.state == 'created': export_to_create += move if not export_to_create: raise UserError(_('There is no posted move item to create a Export-File.')) export_to_create.action_create_export_file() return {'type': 'ir.actions.act_window_close'}<|fim▁end|>
<|file_name|>gulpfile.js<|end_file_name|><|fim▁begin|><|fim▁hole|> gulp.task('test', function() { new karma.Server({configFile: __dirname + '/karma.conf.js', reporters: 'dots'}).start(); var content = fs.readFileSync('copyAndRenameTest.js'); console.log(content); });<|fim▁end|>
var gulp = require('gulp'); var karma = require('karma'); var fs = require('fs');
<|file_name|>453.cpp<|end_file_name|><|fim▁begin|>class Solution {<|fim▁hole|> sort(nums.begin(), nums.end()); int ans = 0; for(int i = 1; i < nums.size(); i++){ ans += nums[i] - nums[0]; } return ans; } };<|fim▁end|>
public: int minMoves(vector<int>& nums) {
<|file_name|>0005_auto_20180717_1615.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Generated by Django 1.11.14 on 2018-07-17 21:15 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('book', '0004_auto_20180717_1547'), ] operations = [ migrations.AddField( model_name='book',<|fim▁hole|> field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Auteurs'), ), migrations.AddField( model_name='book', name='contribution', field=models.BooleanField(default=True, verbose_name='Contribution des auteurs de type « direction » ?'), preserve_default=False, ), ]<|fim▁end|>
name='authors',
<|file_name|>TerminateInstanceInAutoScalingGroupResult.java<|end_file_name|><|fim▁begin|>/* * Copyright 2010-2011 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.autoscaling.model; /** * <p> * The output for the TerminateInstanceInAutoScalingGroup action. * </p> */ public class TerminateInstanceInAutoScalingGroupResult { /** * A Scaling Activity. */ private Activity activity; /** * A Scaling Activity.<|fim▁hole|> * * @return A Scaling Activity. */ public Activity getActivity() { return activity; } /** * A Scaling Activity. * * @param activity A Scaling Activity. */ public void setActivity(Activity activity) { this.activity = activity; } /** * A Scaling Activity. * <p> * Returns a reference to this object so that method calls can be chained together. * * @param activity A Scaling Activity. * * @return A reference to this updated object so that method calls can be chained * together. */ public TerminateInstanceInAutoScalingGroupResult withActivity(Activity activity) { this.activity = activity; return this; } /** * Returns a string representation of this object; useful for testing and * debugging. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); sb.append("Activity: " + activity + ", "); sb.append("}"); return sb.toString(); } }<|fim▁end|>
<|file_name|>tree_reduce.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================= ## @file ostap/frames/tree_reduce.py # Helper module to "Reduce" tree using frames # @see Ostap::DataFrame # @see ROOT::RDataFrame # @author Vanya BELYAEV [email protected] # @date 2018-06-16 # ============================================================================= """Helper module to ``reduce'' tree using frames - see Ostap.DataFrame - see ROOT.ROOT.RDataFrame """ # ============================================================================= __version__ = "$Revision$" __author__ = "Vanya BELYAEV [email protected]" __date__ = "2011-06-07" __all__ = ( 'ReduceTree' , 'reduce' , ) # ============================================================================= import ROOT, os # ============================================================================= # logging # ============================================================================= from ostap.logger.logger import getLogger if '__main__' == __name__ : logger = getLogger( 'ostap.frames.tree_reduce' ) else : logger = getLogger( __name__ ) # ============================================================================= logger.debug ( "``Reduce'' TTree using ROOT::RDataFrame object") # ============================================================================= import ostap.trees.trees from ostap.core.core import cpp, Ostap from ostap.utils.cleanup import CleanUp # ============================================================================= ## @class ReduceTree # Reduce TTree object using intermediate (temporary # @code # tree = ... # r = ReduceTree ( tree , cuts , [ 'px', 'py', 'pz' ] , 'new_file.root' ) # reduced = t.tree # @endcode class ReduceTree(CleanUp): """Reduce ROOT.TTree object >>> tree = ... >>> r = ReduceTree ( tree , cuts , [ 'px', 'py', 'pz' ] >>> reduced = r.tree """ def __init__ ( self , chain , ## input TChain/TTree selection = {} , ## selection/cuts save_vars = () , ## list of variables to save new_vars = {} , ## new variables no_vars = () , ## exclude these variables ## output = '' , ## output file name name = '' , ## the name addselvars = False , ## add varibles from selections? tmp_keep = False , ## keep the temporary file silent = False ): ## silent processing from ostap.frames.frames import DataFrame frame = DataFrame ( chain ) report = None self.__frame_main = frame if not silent : pbar = frame.ProgressBar ( len ( chain ) ) nvars = [] ## new variables for nv in new_vars : frame = frame.Define ( nv , new_vars [ nv] ) nvars.append ( nv ) from ostap.core.ostap_types import ( string_types , listlike_types , dictlike_types ) cut_types = string_types + ( ROOT.TCut , ) Lmax = 30 selections = [] if selection and isinstance ( selection , cut_types ) : ss = str ( selection ).strip() if len ( ss ) < Lmax : filter_name = ss else : filter_name = 'SELECTION' frame = frame.Filter ( ss , filter_name ) selections.append ( ss ) elif selection and isinstance ( selection , dictlike_types ) : for filter_name in selection : s = selection [ filter_name ] assert isinstance ( s , cut_types ),\ 'Invalid selection type %s/%s' % ( s , type ( s ) ) ss = str ( s ).strip() frame = frame.Filter ( ss , str ( filter_name ) ) selections.append ( ss ) elif selection and isinstance ( selection , listlike_types ) : for i , s in enumerate ( selection ) : assert isinstance ( s , cut_types ),\ 'Invalid selection type %s/%s' % ( s , type ( s ) ) ss = str( s ).strip() ## if len ( ss ) < Lmax : filter_name = ss else : filter_name = 'SELECTION%d' % i # frame = frame.Filter ( ss , filter_name ) selections.append ( ss ) elif selection : raise TypeError('Invalid selection type %s/%s' % ( selection , type ( selection ) ) ) if not output : output = self.tempfile ( prefix = 'ostap-frame-' , suffix = '.root' ) ## logger.debug ( 'ReduceTree: output file is %s' % output ) if not tmp_keep : self.trash.add ( output ) ## if selections : report = frame.Report() if selections and addselvars : bvars = chain.the_variables ( selections ) save_vars = list ( bvars ) + [ v for v in save_vars if not v in bvars ] save_vars = tuple ( save_vars ) ## exclude some variables if no_vars and not save_vars : bvars = list ( chain.branches () ) all_vars = list ( bvars ) + [ v for v in nvars if not v in bvars ] save_vars = tuple ( [ v for v in all_vars if not v in no_vars ] ) elif no_vars : bvars = chain.the_variables ( *save_vars ) all_vars = list ( bvars ) + [ v for v in nvars if not v in bvars ] save_vars = tuple ( [ v for v in all_vars if not v in no_vars ] ) nb_ = len ( chain.branches () ) ne_ = len ( chain ) ## chain name: ## FIXME! # cname = chain.GetName() ## produces ROOT error if not name : _ , _ , cname = chain.GetName().rpartition ( '/' ) name = '%s_reduced' % cname self.__name = name if not save_vars : snapshot = frame.Snapshot ( name , output ) else : bvars = chain.the_variables ( *save_vars ) all_vars = list ( bvars ) + [ v for v in nvars if not v in bvars ] from ostap.core.core import strings as _strings all_vars = _strings ( all_vars ) snapshot = frame.Snapshot ( name , output , all_vars ) assert os.path.exists ( output ) and\ os.path.isfile ( output ) , 'Invalid file %s' % fname self.__chain = ROOT.TChain ( name ) self.__chain.Add ( output ) self.__output = output self.__report = 'Tree -> Frame -> Tree filter/transformation' self.__table = [] if report : from ostap.frames.frames import report_print, report_as_table title = self.__report self.__report += '\n%s' % report_print ( report , title , '# ') self.__table = report_as_table ( report ) fs = os.path.getsize ( self.__output ) gb , r = divmod ( fs , 1024 * 1024 * 1024 ) mb , r = divmod ( r , 1024 * 1024 ) kb , r = divmod ( r , 1024 ) if gb : fs = '%.1fGB' % ( float ( fs ) / 1024 / 1024 / 1024 ) elif mb : fs = '%.1fMB' % ( float ( fs ) / 1024 / 1024 ) elif kb : fs = '%.1fkB' % ( float ( fs ) / 1024 ) else : fs = '%sB' % fs nb = len ( self.__chain.branches () ) ne = len ( self.__chain ) self.__report += '\n# Reduce %d -> %d branches, %d -> %d entries' % ( nb_ , nb , ne_ , ne ) self.__report += '\n# Output:%s size:%s' % ( self.__output , fs ) self.__report += '\n# %s' % str ( self.__chain ) del self.__frame_main def __str__ ( self ) : return self.__report def __repr__ ( self ) : return self.__report @property def output ( self ) : """``output'' : the output file name""" return self.__output @property def chain ( self ) : """``chain'': the reduced chain/tree (same as tree)""" return self.__chain @property def name ( self ) : """``name'' : the output chain name""" return self.__name @property def tree ( self ) : """``tree'': the reduced chain/tree (same as chain)""" return self.__chain @property def table ( self ) : """``table'' : get the statitics as table""" return self.__table @property def report ( self ) : """``report'' : get the statitics report""" return self.__report # =============================================================================== ## Powerful method to reduce/tranform the tree/chain. # It relies on Ostap.DataFrame ( alias for ROOT.ROOT.DataFrame) and allows # - filter entries from TTree/TChain # - add new colums # - remove unnesessary columns # @code # tree = .... # reduced1 = tree.reduce ( 'pt>1' ) # reduced2 = tree.reduce ( 'pt>1' , save_vars = [ 'p', 'pt' ,'q' ] ) # reduced3 = tree.reduce ( 'pt>1' , no_vars = [ 'Q', 'z' ,'x' ] ) # reduced4 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } ) # reduced5 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } , output = 'OUTPUT.root' ) # @endcode # @see Ostap::DataFrame # @see ROOT::RDataFrame def reduce ( tree , selection , save_vars = () , new_vars = {} , no_vars = () , output = '' , name = '' , addselvars = False , silent = False ) : """ Powerful method to reduce/tranform the tree/chain. It relies on Ostap.DataFrame ( alias for ROOT.ROOT.DataFrame) and allows - filter entries from TTree/TChain - add new colums - remove unnesessary columns >>> tree = .... >>> reduced1 = tree.reduce ( 'pt>1' ) >>> reduced2 = tree.reduce ( 'pt>1' , vars = [ 'p', 'pt' ,'q' ] ) >>> reduced3 = tree.reduce ( 'pt>1' , no_vars = [ 'Q', 'z' ,'x' ] ) >>> reduced4 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } ) >>> reduced5 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } , output = 'OUTPUT.root' ) """ nb0 = len ( tree.branches() ) ne0 = len ( tree ) reduced = ReduceTree ( tree , selection = selection , save_vars = save_vars , new_vars = new_vars , no_vars = no_vars , output = output , name = name , addselvars = addselvars , tmp_keep = True , silent = silent ) from ostap.trees.trees import Chain <|fim▁hole|> if not output : result.trash.add ( reduced.output ) if silent : nb = len ( result.chain.branches() ) ne = len ( result.chain ) f = float ( nb0 * ne0 ) / ( nb * ne ) logger.info ( 'reduce: (%dx%d) -> (%dx%d) %.1f (branches x entries) ' % ( nb0 , ne0 , nb , ne , f ) ) return result ROOT.TTree. reduce = reduce # ============================================================================= _decorated_classes_ = ( ROOT.TTree , ) _new_methods_ = ( ROOT.TTree.reduce , ) # ============================================================================= if '__main__' == __name__ : from ostap.utils.docme import docme docme ( __name__ , logger = logger ) # ============================================================================= # The END # =============================================================================<|fim▁end|>
result = Chain ( reduced.chain )
<|file_name|>FileInputStreamTest.java<|end_file_name|><|fim▁begin|>package org.github.mervyn.io; import static org.junit.Assert.*; <|fim▁hole|>import java.io.IOException; import java.io.InputStream; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * @ClassName: FileInputStreamTest * @Description: 测试FileInputStream * @author: mervyn * @date 2015年12月26日 上午11:28:07 * */ public class FileInputStreamTest { protected Logger logger = LoggerFactory.getLogger(this.getClass()); /** * testFileInputStream1(测试FileInputStream(File file)构造函数,构造节点输入流) * TODO(这里描述这个方法适用条件 – 可选) * TODO(这里描述这个方法的执行流程 – 可选) * TODO(这里描述这个方法的使用方法 – 可选) * TODO(这里描述这个方法的注意事项 – 可选) * * @Title: testFileInputStream * @Description: 测试FileInputStream(File file)构造函数,构造节点输入流 * @param @throws IOException * @return void 返回类型 * @throws */ @Test public void testFileInputStream1() throws IOException{ String pathname = "src" + File.separator + "test" + File.separator + "resources" + File.separator + "test.txt"; File f = new File(pathname); //构造一个FileInputStream InputStream in = new FileInputStream(f); byte[] b = new byte[(int) f.length()]; //从输入流中读取数据,一次性读入所有字节 int len = in.read(b); //关闭输入流 in.close(); logger.debug("读入了" + len + "个字节的数据"); logger.debug("读入的数据是:" + new String(b)); } }<|fim▁end|>
import java.io.File; import java.io.FileInputStream;
<|file_name|>lt.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>mod tests { // impl<A, B> PartialEq<[B]> for [A] where A: PartialEq<B> { // fn eq(&self, other: &[B]) -> bool { // self.len() == other.len() && // order::eq(self.iter(), other.iter()) // } // fn ne(&self, other: &[B]) -> bool { // self.len() != other.len() || // order::ne(self.iter(), other.iter()) // } // } // impl<T: Eq> Eq for [T] {} // impl<T: PartialOrd> PartialOrd for [T] { // #[inline] // fn partial_cmp(&self, other: &[T]) -> Option<Ordering> { // order::partial_cmp(self.iter(), other.iter()) // } // #[inline] // fn lt(&self, other: &[T]) -> bool { // order::lt(self.iter(), other.iter()) // } // #[inline] // fn le(&self, other: &[T]) -> bool { // order::le(self.iter(), other.iter()) // } // #[inline] // fn ge(&self, other: &[T]) -> bool { // order::ge(self.iter(), other.iter()) // } // #[inline] // fn gt(&self, other: &[T]) -> bool { // order::gt(self.iter(), other.iter()) // } // } // impl<T: Ord> Ord for [T] { // fn cmp(&self, other: &[T]) -> Ordering { // order::cmp(self.iter(), other.iter()) // } // } type T = i32; // T: Ord #[test] fn lt_test1() { let slice: &[T] = &[1, 2]; let other: &[T] = &[2, 1, 0]; let result: bool = slice.lt(other); assert_eq!(result, true); } #[test] fn lt_test2() { let slice: &[T] = &[1, 2]; let other: &[T] = &[2, 1, 0]; let result: bool = slice < other; assert_eq!(result, true); } #[test] fn lt_test3() { let slice: &[T] = &[3, 2]; let other: &[T] = &[2, 1, 0]; let result: bool = slice.lt(other); assert_eq!(result, false); } #[test] fn lt_test4() { let slice: &[T] = &[3, 2]; let other: &[T] = &[2, 1, 0]; let result: bool = slice < other; assert_eq!(result, false); } #[test] fn lt_test5() { let slice: &[T] = &[1, 2]; let other: &[T] = &[1, 2]; let result: bool = slice.lt(other); assert_eq!(result, false); } #[test] fn lt_test6() { let slice: &[T] = &[1, 2]; let other: &[T] = &[1, 2]; let result: bool = slice < other; assert_eq!(result, false); } #[test] fn lt_test7() { let slice: &[T] = &[1, 2]; let other: &[T] = &[1, 2, 3]; let result: bool = slice.lt(other); assert_eq!(result, true); } #[test] fn lt_test8() { let slice: &[T] = &[1, 2]; let other: &[T] = &[1, 2, 3]; let result: bool = slice < other; assert_eq!(result, true); } #[test] fn lt_test9() { let slice: &[T] = &[1, 2, 3]; let other: &[T] = &[1, 2]; let result: bool = slice.lt(other); assert_eq!(result, false); } #[test] fn lt_test10() { let slice: &[T] = &[1, 2, 3]; let other: &[T] = &[1, 2]; let result: bool = slice < other; assert_eq!(result, false); } }<|fim▁end|>
#![feature(core)] extern crate core; #[cfg(test)]
<|file_name|>snmp-test.py<|end_file_name|><|fim▁begin|>from snmp_helper import snmp_get_oid,snmp_extract COMMUNITY_STRING = 'galileo' SNMP_PORT = 161 IP = '184.105.247.70' a_device = (IP, COMMUNITY_STRING, SNMP_PORT)<|fim▁hole|>print output<|fim▁end|>
OID = '1.3.6.1.2.1.1.1.0' snmp_data = snmp_get_oid(a_device, oid=OID) output = snmp_extract(snmp_data)
<|file_name|>section_positive_test1.java<|end_file_name|><|fim▁begin|>//Pyjama compiler version:v1.5.3 package PyjamaCode.TestingDirectives.Sections; import pj.Pyjama; import pj.pr.*; import pj.PjRuntime; import pj.Pyjama; import pi.*; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantLock; import javax.swing.SwingUtilities; import java.lang.reflect.InvocationTargetException; import pj.pr.exceptions.OmpParallelRegionLocalCancellationException; public class section_positive_test1 { /** * return 2 because nomater how many threads are allowed there are only two section * */ public int[] parallel_sections(int threadNumber) {{ Pyjama.omp_set_num_threads(threadNumber); int[] array = new int[threadNumber]; int index = 0; /*OpenMP Parallel region (#0) -- START */ InternalControlVariables icv_previous__OMP_ParallelRegion_0 = PjRuntime.getCurrentThreadICV(); InternalControlVariables icv__OMP_ParallelRegion_0 = PjRuntime.inheritICV(icv_previous__OMP_ParallelRegion_0); int _threadNum__OMP_ParallelRegion_0 = icv__OMP_ParallelRegion_0.nthreads_var.get(icv__OMP_ParallelRegion_0.levels_var); ConcurrentHashMap<String, Object> inputlist__OMP_ParallelRegion_0 = new ConcurrentHashMap<String,Object>(); ConcurrentHashMap<String, Object> outputlist__OMP_ParallelRegion_0 = new ConcurrentHashMap<String,Object>(); inputlist__OMP_ParallelRegion_0.put("array",array); inputlist__OMP_ParallelRegion_0.put("index",index); _OMP_ParallelRegion_0 _OMP_ParallelRegion_0_in = new _OMP_ParallelRegion_0(_threadNum__OMP_ParallelRegion_0,icv__OMP_ParallelRegion_0,inputlist__OMP_ParallelRegion_0,outputlist__OMP_ParallelRegion_0); _OMP_ParallelRegion_0_in.runParallelCode(); array = (int[])outputlist__OMP_ParallelRegion_0.get("array"); index = (Integer)outputlist__OMP_ParallelRegion_0.get("index"); PjRuntime.recoverParentICV(icv_previous__OMP_ParallelRegion_0); RuntimeException OMP_ee_0 = (RuntimeException) _OMP_ParallelRegion_0_in.OMP_CurrentParallelRegionExceptionSlot.get(); if (OMP_ee_0 != null) {throw OMP_ee_0;} /*OpenMP Parallel region (#0) -- END */ return array; } } class _OMP_ParallelRegion_0{ private int OMP_threadNumber = 1; private InternalControlVariables icv; private ConcurrentHashMap<String, Object> OMP_inputList = new ConcurrentHashMap<String, Object>(); private ConcurrentHashMap<String, Object> OMP_outputList = new ConcurrentHashMap<String, Object>(); private ReentrantLock OMP_lock; private ParIterator<?> OMP__ParIteratorCreator; public AtomicReference<Throwable> OMP_CurrentParallelRegionExceptionSlot = new AtomicReference<Throwable>(null); //#BEGIN shared variables defined here int[] array = null; int index = 0; //#END shared variables defined here public _OMP_ParallelRegion_0(int thread_num, InternalControlVariables icv, ConcurrentHashMap<String, Object> inputlist, ConcurrentHashMap<String, Object> outputlist) { this.icv = icv; if ((false == Pyjama.omp_get_nested()) && (Pyjama.omp_get_level() > 0)) { this.OMP_threadNumber = 1; }else { this.OMP_threadNumber = thread_num; } this.OMP_inputList = inputlist; this.OMP_outputList = outputlist; icv.currentParallelRegionThreadNumber = this.OMP_threadNumber; icv.OMP_CurrentParallelRegionBarrier = new PjCyclicBarrier(this.OMP_threadNumber); //#BEGIN shared variables initialised here array = (int[])OMP_inputList.get("array"); index = (Integer)OMP_inputList.get("index"); //#END shared variables initialised here } private void updateOutputListForSharedVars() { //BEGIN update outputlist OMP_outputList.put("array",array); OMP_outputList.put("index",index); //END update outputlist } class MyCallable implements Callable<ConcurrentHashMap<String,Object>> { private int alias_id; private ConcurrentHashMap<String, Object> OMP_inputList; private ConcurrentHashMap<String, Object> OMP_outputList; //#BEGIN private/firstprivate reduction variables defined here //#END private/firstprivate reduction variables defined here MyCallable(int id, ConcurrentHashMap<String,Object> inputlist, ConcurrentHashMap<String,Object> outputlist){ this.alias_id = id; this.OMP_inputList = inputlist; this.OMP_outputList = outputlist; //#BEGIN firstprivate reduction variables initialised here //#END firstprivate reduction variables initialised here } @Override public ConcurrentHashMap<String,Object> call() { try { /****User Code BEGIN***/ /*OpenMP Work Share region (#0) -- START */ {//#BEGIN firstprivate lastprivate reduction variables defined and initialized here //#set implicit barrier here, otherwise unexpected initial value happens PjRuntime.setBarrier(); //#END firstprivate lastprivate reduction variables defined and initialized here try{ int _OMP_VANCY_ITERATOR_=0; int OMP_iterator = 0; int OMP_end = (int)((2)-(0))/(1); if (((2)-(0))%(1) == 0) { OMP_end = OMP_end - 1; } if (0 == Pyjama.omp_get_thread_num()) { PjRuntime.get_OMP_loopCursor().getAndSet(0);} PjRuntime.setBarrier(); while ((OMP_iterator = PjRuntime.get_OMP_loopCursor().getAndAdd(1)) <= OMP_end) { for (int OMP_local_iterator = OMP_iterator; OMP_local_iterator<OMP_iterator+1 && OMP_local_iterator<=OMP_end; OMP_local_iterator++){ _OMP_VANCY_ITERATOR_ = 0 + OMP_local_iterator * (1); switch(_OMP_VANCY_ITERATOR_) { case 0: { index = Pyjama.omp_get_thread_num(); array[index] += 1; } break; case 1: { index = Pyjama.omp_get_thread_num(); array[index] += 1; } break; default: break; }if (OMP_end == OMP_local_iterator) { //BEGIN lastprivate variables value set //END lastprivate variables value set } } } } catch (pj.pr.exceptions.OmpWorksharingLocalCancellationException wse){ } catch (Exception e){throw e;} //BEGIN reduction PjRuntime.reductionLockForWorksharing.lock(); PjRuntime.reductionLockForWorksharing.unlock();//END reduction PjRuntime.setBarrier(); } PjRuntime.setBarrier(); PjRuntime.reset_OMP_orderCursor(); /*OpenMP Work Share region (#0) -- END */ /****User Code END***/ //BEGIN reduction procedure //END reduction procedure PjRuntime.setBarrier(); } catch (OmpParallelRegionLocalCancellationException e) { PjRuntime.decreaseBarrierCount(); } catch (Exception e) { PjRuntime.decreaseBarrierCount(); PjExecutor.cancelCurrentThreadGroup(); OMP_CurrentParallelRegionExceptionSlot.compareAndSet(null, e);<|fim▁hole|> if (0 == this.alias_id) { updateOutputListForSharedVars(); } return null; } } public void runParallelCode() { for (int i = 1; i <= this.OMP_threadNumber-1; i++) { Callable<ConcurrentHashMap<String,Object>> slaveThread = new MyCallable(i, OMP_inputList, OMP_outputList); PjRuntime.submit(i, slaveThread, icv); } Callable<ConcurrentHashMap<String,Object>> masterThread = new MyCallable(0, OMP_inputList, OMP_outputList); PjRuntime.getCurrentThreadICV().currentThreadAliasID = 0; try { masterThread.call(); } catch (Exception e) { e.printStackTrace(); } } } }<|fim▁end|>
}
<|file_name|>partsum1.cpp<|end_file_name|><|fim▁begin|>// STLport regression testsuite component. // To compile as a separate example, please #define MAIN. #include <iterator> #include <vector> #include <algorithm><|fim▁hole|>#include <iterator> #ifdef MAIN #define partsum1_test main #endif #if !defined (STLPORT) || defined(__STL_USE_NAMESPACES) using namespace std; #endif int partsum1_test(int, char**) { cout<<"Results of partsum1_test:"<<endl; vector <int> v1(10); iota(v1.begin(), v1.end(), 0); vector <int> v2(v1.size()); partial_sum(v1.begin(), v1.end(), v2.begin()); ostream_iterator <int> iter(cout, " "); copy(v1.begin(), v1.end(), iter); cout << endl; copy(v2.begin(), v2.end(), iter); cout << endl; return 0; }<|fim▁end|>
#include <iostream> #include <numeric>
<|file_name|>AssertWrapper.java<|end_file_name|><|fim▁begin|>package junit.util; import static org.hamcrest.CoreMatchers.*; import static org.junit.Assert.*; /** * JUnit4からのassertThatアサーションを使いやすくするためのラッパー処理群。<br> * isのためのimportが面倒、Boxingは避けたいという御仁向け。<br> * assertThatのシグネチャが改善される可能性を考慮してメソッド名は別名にしています。 * * @author cobot00 */ public final class AssertWrapper { private AssertWrapper() { // コンストラクタの隠蔽 } public static void assertThatWrapper(int actual, int expected) { assertThat(Integer.valueOf(actual), is(Integer.valueOf(expected))); } public static void assertThatWrapper(long actual, long expected) { assertThat(Long.valueOf(actual), is(Long.valueOf(expected))); } public static void assertThatWrapper(double actual, double expected) { assertThat(Double.valueOf(actual), is(Double.valueOf(expected))); } public static void assertThatWrapper(String actual, String expected) { assertThat(actual, is(expected)); } public static void assertEqualsWrapper(boolean actual, boolean expected) { if (expected) { assertTrue("\n Expected [" + expected + "] But actual [" + actual + "]", actual); } else { assertFalse("\n Expected [" + expected + "] But actual [" + actual + "]", actual); } } public static void assertThatWrapper(int actual, int expected, String message) { assertThat(message, Integer.valueOf(actual), is(Integer.valueOf(expected))); } public static void assertThatWrapper(double actual, double expected, String message) { assertThat(message, Double.valueOf(actual), is(Double.valueOf(expected))); } public static void assertThatWrapper(long actual, long expected, String message) {<|fim▁hole|> assertThat(message, actual, is(expected)); } }<|fim▁end|>
assertThat(message, Long.valueOf(actual), is(Long.valueOf(expected))); } public static void assertThatWrapper(String actual, String expected, String message) {
<|file_name|>WebAssetStatementDeserializerTest.java<|end_file_name|><|fim▁begin|>/* * Copyright 2016 The OpenYOLO Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS,<|fim▁hole|> * See the License for the specific language governing permissions and * limitations under the License. */ package org.openyolo.spi.assetlinks.data; import static junit.framework.Assert.assertNotNull; import static junit.framework.Assert.assertTrue; import java.util.List; import org.json.JSONObject; import org.junit.Test; import org.junit.runner.RunWith; import org.robolectric.RobolectricTestRunner; import org.robolectric.annotation.Config; import org.valid4j.errors.RequireViolation; /** * Tests for {@link WebAssetStatementDeserializer}. */ @RunWith(RobolectricTestRunner.class) @Config(manifest = Config.NONE) public class WebAssetStatementDeserializerTest { @Test(expected = RequireViolation.class) public void testNullJson() { new WebAssetStatementDeserializer().deserialize(null); } @Test public void testNoTarget() { JSONObject json = new JSONObject(); final List<WebSiteAssetStatement> assetStatements = new WebAssetStatementDeserializer() .deserialize(json); assertNotNull(assetStatements); assertTrue(assetStatements.isEmpty()); } }<|fim▁end|>
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<|file_name|>blocklist-file.rs<|end_file_name|><|fim▁begin|>#![allow( dead_code, non_snake_case, non_camel_case_types, non_upper_case_globals )] #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct SizedIntegers { pub x: u8, pub y: u16, pub z: u32, } #[test] fn bindgen_test_layout_SizedIntegers() { assert_eq!( ::std::mem::size_of::<SizedIntegers>(), 8usize, concat!("Size of: ", stringify!(SizedIntegers)) ); assert_eq!( ::std::mem::align_of::<SizedIntegers>(), 4usize, concat!("Alignment of ", stringify!(SizedIntegers)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<SizedIntegers>())).x as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(SizedIntegers), "::", stringify!(x) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<SizedIntegers>())).y as *const _ as usize }, 2usize, concat!( "Offset of field: ", stringify!(SizedIntegers), "::", stringify!(y) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<SizedIntegers>())).z as *const _ as usize<|fim▁hole|> stringify!(SizedIntegers), "::", stringify!(z) ) ); } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct StructWithBlocklistedFwdDecl { pub b: u8, } #[test] fn bindgen_test_layout_StructWithBlocklistedFwdDecl() { assert_eq!( ::std::mem::size_of::<StructWithBlocklistedFwdDecl>(), 1usize, concat!("Size of: ", stringify!(StructWithBlocklistedFwdDecl)) ); assert_eq!( ::std::mem::align_of::<StructWithBlocklistedFwdDecl>(), 1usize, concat!("Alignment of ", stringify!(StructWithBlocklistedFwdDecl)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<StructWithBlocklistedFwdDecl>())).b as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(StructWithBlocklistedFwdDecl), "::", stringify!(b) ) ); }<|fim▁end|>
}, 4usize, concat!( "Offset of field: ",
<|file_name|>interface.ts<|end_file_name|><|fim▁begin|>/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ export interface Position { readonly offset: number; readonly line: number; readonly character: number; } export type JsonAstNode = JsonAstNumber | JsonAstString | JsonAstIdentifier | JsonAstArray | JsonAstObject | JsonAstConstantFalse | JsonAstConstantNull | JsonAstConstantTrue; export interface JsonAstNodeBase { readonly start: Position; readonly end: Position; readonly text: string; readonly comments?: (JsonAstComment | JsonAstMultilineComment)[]; } export interface JsonAstNumber extends JsonAstNodeBase { readonly kind: 'number'; readonly value: number; } export interface JsonAstString extends JsonAstNodeBase { readonly kind: 'string'; readonly value: string;<|fim▁hole|> export interface JsonAstIdentifier extends JsonAstNodeBase { readonly kind: 'identifier'; readonly value: string; } export interface JsonArray extends Array<JsonValue> {} export interface JsonAstArray extends JsonAstNodeBase { readonly kind: 'array'; readonly elements: JsonAstNode[]; readonly value: JsonArray; } export interface JsonObject { [prop: string]: JsonValue; } export interface JsonAstKeyValue extends JsonAstNodeBase { readonly kind: 'keyvalue'; readonly key: JsonAstString | JsonAstIdentifier; readonly value: JsonAstNode; } export interface JsonAstObject extends JsonAstNodeBase { readonly kind: 'object'; readonly properties: JsonAstKeyValue[]; readonly value: JsonObject; } export interface JsonAstConstantFalse extends JsonAstNodeBase { readonly kind: 'false'; readonly value: false; } export interface JsonAstConstantNull extends JsonAstNodeBase { readonly kind: 'null'; readonly value: null; } export interface JsonAstConstantTrue extends JsonAstNodeBase { readonly kind: 'true'; readonly value: true; } // Loose mode AST. export interface JsonAstMultilineComment extends JsonAstNodeBase { readonly kind: 'multicomment'; readonly content: string; } export interface JsonAstComment extends JsonAstNodeBase { readonly kind: 'comment'; readonly content: string; } export type JsonValue = JsonAstNode['value']; export function isJsonObject(value: JsonValue): value is JsonObject { return value != null && typeof value === 'object' && !Array.isArray(value); } export function isJsonArray(value: JsonValue): value is JsonArray { return Array.isArray(value); }<|fim▁end|>
}
<|file_name|>Community_Basic.js<|end_file_name|><|fim▁begin|>/*: * @plugindesc Basic plugin for manipulating important parameters. * @author RM CoreScript team * * @help * Basic plugin for manipulating important parameters. * There is no plugin command. * * Caching images improves performance but increases memory allocation. * On mobile devices, a lot of memory allocation causes the browser to crash. * Therefore, the upper limit of memory allocation is set with cacheLimit. * * If you want to regain high performance, just increase cacheLimit. * There is no need to revert to 1.4. * * @param cacheLimit * @type number * @desc The upper limit of images' cached size (MPixel) * @default 10 * * @param screenWidth * @type number * @desc The resolution of screen width * @default 816 * * @param screenHeight * @type number * @desc The resolution of screen height * @default 624 * * @param changeWindowWidthTo * @type number * @desc If set, change window width to this value * * @param changeWindowHeightTo * @type number * @desc If set, change window height to this value * * @param renderingMode * @type select * @option canvas * @option webgl * @option auto * @desc The rendering mode (canvas/webgl/auto) * @default auto * * @param alwaysDash * @type boolean * @desc The initial value whether the player always dashes (on/off) * @on ON * @off OFF * @default false * * @param textSpeed<|fim▁hole|> * @type number * @desc The text speed on "Show Text". The larger this parameter is, the slower text speed. (0: show all texts at once) * @default 1 * * @param autoSaveFileId * @type number * @desc The file number to auto save when "Transfer Player" (0: off) * @default 0 * * @param errorMessage * @type string * @desc The message when error occurred * @default Error occurred. Please ask to the creator of this game. * * @param showErrorDetail * @type boolean * @desc Show where the error is caused and stack trace when error * @default true * * @param enableProgressBar * @type boolean * @desc Show progress bar when it takes a long time to load resources * @default true * * @param maxRenderingFps * @type number * @desc The maximum value of rendering frame per seconds (0: unlimited) * @default 0 */ /*:ja * @plugindesc 基本的なパラメーターを設定するプラグインです。 * @author RM CoreScript team * * @help * 基本的なパラメーターを設定するプラグインです。 * このプラグインにはプラグインコマンドはありません。 * * 画像をキャッシュするとパフォーマンスは向上しますが、その分メモリ確保も増大します。 * モバイルデバイスでは、たくさんのメモリ確保はブラウザをクラッシュさせます。 * そこで、メモリ確保の上限を「画像キャッシュ上限値」で設定しています。 * * もし高いパフォーマンスを取り戻したければ、ただ画像キャッシュ上限値を増加させればよいです。 * 1.4に戻す必要はありません。 * * @param cacheLimit * @type number * @text 画像キャッシュ上限値 * @desc 画像のメモリへのキャッシュの上限値 (MPix) * @default 10 * * @param screenWidth * @type number * @text ゲーム画面の幅 * @default 816 * * @param screenHeight * @type number * @text ゲーム画面の高さ * @default 624 * * @param changeWindowWidthTo * @type number * @text ウィンドウの幅 * @desc 値が設定されなかった場合、ゲーム画面の幅と同じ * * @param changeWindowHeightTo * @type number * @text ウィンドウの高さ * @desc 値が設定されなかった場合、ゲーム画面の高さと同じ * * @param renderingMode * @type select * @option canvas * @option webgl * @option auto * @text レンダリングモード * @default auto * * @param alwaysDash * @type boolean * @text 「常時ダッシュ」の初期値 * @on ON * @off OFF * @default false * * @param textSpeed * @type number * @text 「文章の表示」のスピード * @desc 数字が大きいほど文章の表示スピードが遅くなります (0を指定した場合は一度に全文を表示します) * @default 1 * * @param autoSaveFileId * @type number * @text オートセーブ番号 * @desc 「場所移動」の際に指定したファイル番号にオートセーブします(0を指定した場合はオートセーブしません) * @default 0 * * @param errorMessage * @type string * @text エラーメッセージ * @desc エラー時にプレイヤーに向けて表示するメッセージです * @default エラーが発生しました。ゲームの作者にご連絡ください。 * * @param showErrorDetail * @type boolean * @text エラー詳細表示 * @desc ONにすると、エラー時にエラーを発生させたイベントの情報とスタックトレースを表示します * @default true * * @param enableProgressBar * @type boolean * @text ロード進捗バー有効化 * @desc ONにすると、読み込みに時間がかかっている時にロード進捗バーを表示します * @default true * * @param maxRenderingFps * @type number * @text 描画FPS上限値 * @desc 描画FPSの上限値を設定します (0を指定した場合は制限なし) * @default 0 */ (function() { 'use strict'; function isNumber(str) { return !!str && !isNaN(str); } function toNumber(str, def) { return isNumber(str) ? +str : def; } var parameters = PluginManager.parameters('Community_Basic'); var cacheLimit = toNumber(parameters['cacheLimit'], 10); var screenWidth = toNumber(parameters['screenWidth'], 816); var screenHeight = toNumber(parameters['screenHeight'], 624); var renderingMode = parameters['renderingMode'].toLowerCase(); var alwaysDash = (parameters['alwaysDash'] === 'true') ||(parameters['alwaysDash'] === 'on'); var textSpeed = toNumber(parameters['textSpeed'], 1); var windowWidthTo = toNumber(parameters['changeWindowWidthTo'], 0); var windowHeightTo = toNumber(parameters['changeWindowHeightTo'], 0); var maxRenderingFps = toNumber(parameters['maxRenderingFps'], 0); var autoSaveFileId = toNumber(parameters['autoSaveFileId'], 0); var errorMessage = parameters['errorMessage']; var showErrorDetail = parameters['showErrorDetail'] === 'true'; var enableProgressBar = parameters['enableProgressBar'] === 'true'; var windowWidth; var windowHeight; if(windowWidthTo){ windowWidth = windowWidthTo; }else if(screenWidth !== SceneManager._screenWidth){ windowWidth = screenWidth; } if(windowHeightTo){ windowHeight = windowHeightTo; }else if(screenHeight !== SceneManager._screenHeight){ windowHeight = screenHeight; } ImageCache.limit = cacheLimit * 1000 * 1000; SceneManager._screenWidth = screenWidth; SceneManager._screenHeight = screenHeight; SceneManager._boxWidth = screenWidth; SceneManager._boxHeight = screenHeight; SceneManager.preferableRendererType = function() { if (Utils.isOptionValid('canvas')) { return 'canvas'; } else if (Utils.isOptionValid('webgl')) { return 'webgl'; } else if (renderingMode === 'canvas') { return 'canvas'; } else if (renderingMode === 'webgl') { return 'webgl'; } else { return 'auto'; } }; var _ConfigManager_applyData = ConfigManager.applyData; ConfigManager.applyData = function(config) { _ConfigManager_applyData.apply(this, arguments); if (config['alwaysDash'] === undefined) { this.alwaysDash = alwaysDash; } }; var _Window_Message_clearFlags = Window_Message.prototype.clearFlags; Window_Message.prototype.clearFlags = function(textState) { _Window_Message_clearFlags.apply(this, arguments); this._textSpeed = textSpeed - 1; }; var _SceneManager_initNwjs = SceneManager.initNwjs; SceneManager.initNwjs = function() { _SceneManager_initNwjs.apply(this, arguments); if (Utils.isNwjs() && windowWidth && windowHeight) { var dw = windowWidth - window.innerWidth; var dh = windowHeight - window.innerHeight; window.moveBy(-dw / 2, -dh / 2); window.resizeBy(dw, dh); } }; if (maxRenderingFps) { var currentTime = Date.now(); var deltaTime = 1000 / maxRenderingFps; var accumulator = 0; var _SceneManager_renderScene = SceneManager.renderScene; SceneManager.renderScene = function() { var newTime = Date.now(); accumulator += newTime - currentTime; currentTime = newTime; if (accumulator >= deltaTime) { accumulator -= deltaTime; _SceneManager_renderScene.apply(this, arguments); } }; } DataManager.setAutoSaveFileId(autoSaveFileId); Graphics.setErrorMessage(errorMessage); Graphics.setShowErrorDetail(showErrorDetail); Graphics.setProgressEnabled(enableProgressBar); })();<|fim▁end|>
<|file_name|>test_lib_datfile.py<|end_file_name|><|fim▁begin|>import os import unittest import synapse import synapse.lib.datfile as s_datfile from synapse.tests.common import * syndir = os.path.dirname(synapse.__file__) class DatFileTest(SynTest): def test_datfile_basic(self): with s_datfile.openDatFile('synapse.tests/test.dat') as fd:<|fim▁hole|><|fim▁end|>
self.nn(fd) self.eq(fd.read(), b'woot\n')
<|file_name|>Example_2_2_3b.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python from pyliferisk import MortalityTable from pyliferisk.mortalitytables import GKM95 import numpy as np mt = MortalityTable(nt=GKM95) x = 40 #age n = 20 #horizon C = 10000 #capital i = 0.03 #interest rate payments = [] for t in range(0,n): payments.append((mt.lx[x+t] - mt.lx[x+t+1]) / mt.lx[x] * C) discount_factor = [] for y in range(0,n): discount_factor.append(1 / (1 + i) ** (y + 0.5)) print('{0:5} {1:10} {2:10}'.format(' t', 'factor', 'payment'))<|fim▁hole|><|fim▁end|>
for t in range(0,n): print('{0:2} {1:10} {2:10}'.format(t, np.around(discount_factor[t], 5), np.around(payments[t], 4)))
<|file_name|>infinite-loops.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or<|fim▁hole|>// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /* A simple way to make sure threading works. This should use all the CPU cycles an any machines that we're likely to see for a while. */ // ignore-test fn loopy(n: isize) { if n > 0 { spawn(move|| { loopy(n - 1) }); spawn(move|| { loopy(n - 1) }); } loop { } } pub fn main() { // Commenting this out, as this will hang forever otherwise. // Even after seeing the comment above, I'm not sure what the // intention of this test is. // spawn(move|| { loopy(5) }); }<|fim▁end|>
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
<|file_name|>loader.py<|end_file_name|><|fim▁begin|># -------------------------------------------------------- # Deformable Convolutional Networks # Copyright (c) 2016 by Contributors # Copyright (c) 2017 Microsoft # Licensed under The Apache-2.0 License [see LICENSE for details] # Modified by Zheng Zhang # -------------------------------------------------------- import numpy as np import mxnet as mx import random import math from mxnet.executor_manager import _split_input_slice from utils.image import tensor_vstack from segmentation.segmentation import get_segmentation_train_batch, get_segmentation_test_batch from PIL import Image from multiprocessing import Pool class TestDataLoader(mx.io.DataIter): def __init__(self, segdb, config, batch_size=1, shuffle=False): super(TestDataLoader, self).__init__() # save parameters as properties self.segdb = segdb self.batch_size = batch_size self.shuffle = shuffle self.config = config # infer properties from roidb self.size = len(self.segdb) self.index = np.arange(self.size) # decide data and label names (only for training) self.data_name = ['data'] self.label_name = None # status variable for synchronization between get_data and get_label self.cur = 0 self.data = None self.label = [] self.im_info = None # get first batch to fill in provide_data and provide_label self.reset() self.get_batch() @property def provide_data(self): return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))] @property def provide_label(self): return [None for i in xrange(len(self.data))] @property def provide_data_single(self): return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])] @property def provide_label_single(self): return None def reset(self): self.cur = 0 if self.shuffle: np.random.shuffle(self.index) def iter_next(self): return self.cur < self.size def next(self): if self.iter_next(): self.get_batch() self.cur += self.batch_size return mx.io.DataBatch(data=self.data, label=self.label, pad=self.getpad(), index=self.getindex(), provide_data=self.provide_data, provide_label=self.provide_label) else: raise StopIteration def getindex(self): return self.cur / self.batch_size def getpad(self): if self.cur + self.batch_size > self.size: return self.cur + self.batch_size - self.size else: return 0 def get_batch(self): cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) segdb = [self.segdb[self.index[i]] for i in range(cur_from, cur_to)] data, label, im_info = get_segmentation_test_batch(segdb, self.config) self.data = [[mx.nd.array(data[i][name]) for name in self.data_name] for i in xrange(len(data))] self.im_info = im_info<|fim▁hole|> """ This Iter will provide seg data to Deeplab network :param sym: to infer shape :param segdb: must be preprocessed :param config: config file :param batch_size: must divide BATCH_SIZE(128) :param crop_height: the height of cropped image :param crop_width: the width of cropped image :param shuffle: bool :param ctx: list of contexts :param work_load_list: list of work load :return: DataLoader """ super(TrainDataLoader, self).__init__() # save parameters as properties self.sym = sym self.segdb = segdb self.config = config self.batch_size = batch_size if self.config.TRAIN.ENABLE_CROP: self.crop_height = crop_height self.crop_width = crop_width else: self.crop_height = None self.crop_width = None self.shuffle = shuffle self.ctx = ctx if self.ctx is None: self.ctx = [mx.cpu()] self.work_load_list = work_load_list # infer properties from segdb self.size = len(segdb) self.index = np.arange(self.size) # decide data and label names self.data_name = ['data'] self.label_name = ['label'] # status variable for synchronization between get_data and get_label self.cur = 0 self.batch = None self.data = None self.label = None # init multi-process pool self.pool = Pool(processes = len(self.ctx)) # get first batch to fill in provide_data and provide_label self.reset() self.get_batch_parallel() random.seed() @property def provide_data(self): return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))] @property def provide_label(self): return [[(k, v.shape) for k, v in zip(self.label_name, self.label[i])] for i in xrange(len(self.data))] @property def provide_data_single(self): return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])] @property def provide_label_single(self): return [(k, v.shape) for k, v in zip(self.label_name, self.label[0])] def reset(self): self.cur = 0 if self.shuffle: np.random.shuffle(self.index) def iter_next(self): return self.cur + self.batch_size <= self.size def next(self): if self.iter_next(): self.get_batch_parallel() self.cur += self.batch_size return mx.io.DataBatch(data=self.data, label=self.label, pad=self.getpad(), index=self.getindex(), provide_data=self.provide_data, provide_label=self.provide_label) else: raise StopIteration def getindex(self): return self.cur / self.batch_size def getpad(self): if self.cur + self.batch_size > self.size: return self.cur + self.batch_size - self.size else: return 0 def infer_shape(self, max_data_shape=None, max_label_shape=None): """ Return maximum data and label shape for single gpu """ if max_data_shape is None: max_data_shape = [] if max_label_shape is None: max_label_shape = [] max_shapes = dict(max_data_shape + max_label_shape) _, label_shape, _ = self.sym.infer_shape(**max_shapes) label_shape = [(self.label_name[0], label_shape)] return max_data_shape, label_shape def get_batch_parallel(self): cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) segdb = [self.segdb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) multiprocess_results = [] for idx, islice in enumerate(slices): isegdb = [segdb[i] for i in range(islice.start, islice.stop)] multiprocess_results.append(self.pool.apply_async(parfetch, (self.config, self.crop_width, self.crop_height, isegdb))) rst = [multiprocess_result.get() for multiprocess_result in multiprocess_results] all_data = [_['data'] for _ in rst] all_label = [_['label'] for _ in rst] self.data = [[mx.nd.array(data[key]) for key in self.data_name] for data in all_data] self.label = [[mx.nd.array(label[key]) for key in self.label_name] for label in all_label] def parfetch(config, crop_width, crop_height, isegdb): # get testing data for multigpu data, label = get_segmentation_train_batch(isegdb, config) if config.TRAIN.ENABLE_CROP: data_internal = data['data'] label_internal = label['label'] sx = math.floor(random.random() * (data_internal.shape[3] - crop_width + 1)) sy = math.floor(random.random() * (data_internal.shape[2] - crop_height + 1)) sx = (int)(sx) sy = (int)(sy) assert(sx >= 0 and sx < data_internal.shape[3] - crop_width + 1) assert(sy >= 0 and sy < data_internal.shape[2] - crop_height + 1) ex = (int)(sx + crop_width - 1) ey = (int)(sy + crop_height - 1) data_internal = data_internal[:, :, sy : ey + 1, sx : ex + 1] label_internal = label_internal[:, :, sy : ey + 1, sx : ex + 1] data['data'] = data_internal label['label'] = label_internal assert (data['data'].shape[2] == crop_height) and (data['data'].shape[3] == crop_width) assert (label['label'].shape[2] == crop_height) and (label['label'].shape[3] == crop_width) return {'data': data, 'label': label}<|fim▁end|>
class TrainDataLoader(mx.io.DataIter): def __init__(self, sym, segdb, config, batch_size=1, crop_height = 768, crop_width = 1024, shuffle=False, ctx=None, work_load_list=None):
<|file_name|>lmgtfy_bot.py<|end_file_name|><|fim▁begin|>from urllib.parse import quote_plus import praw QUESTIONS = ['what is', 'who is', 'what are'] REPLY_TEMPLATE = '[Let me google that for you](http://lmgtfy.com/?q={})' def main(): reddit = praw.Reddit(user_agent='LMGTFY (by /u/USERNAME)', client_id='CLIENT_ID', client_secret="CLIENT_SECRET", username='USERNAME', password='PASSWORD') subreddit = reddit.subreddit('AskReddit') for submission in subreddit.stream.submissions(): process_submission(submission) def process_submission(submission): # Ignore titles with more than 10 words as they probably are not simple<|fim▁hole|> if len(submission.title.split()) > 10: return normalized_title = submission.title.lower() for question_phrase in QUESTIONS: if question_phrase in normalized_title: url_title = quote_plus(submission.title) reply_text = REPLY_TEMPLATE.format(url_title) print('Replying to: {}'.format(submission.title)) submission.reply(reply_text) # A reply has been made so do not attempt to match other phrases. break if __name__ == '__main__': main()<|fim▁end|>
# questions.
<|file_name|>reserved-prefixes-migration.rs<|end_file_name|><|fim▁begin|>// check-pass // run-rustfix // compile-flags: -Z unstable-options --edition 2018 #![warn(rust_2021_prefixes_incompatible_syntax)] macro_rules! m2 { ($a:tt $b:tt) => {}; } macro_rules! m3 { ($a:tt $b:tt $c:tt) => {}; } <|fim▁hole|> m2!(prefix"hey"); //~^ WARNING prefix `prefix` is unknown [rust_2021_prefixes_incompatible_syntax] //~| WARNING hard error in Rust 2021 m3!(hey#123); //~^ WARNING prefix `hey` is unknown [rust_2021_prefixes_incompatible_syntax] //~| WARNING hard error in Rust 2021 m3!(hey#hey); //~^ WARNING prefix `hey` is unknown [rust_2021_prefixes_incompatible_syntax] //~| WARNING hard error in Rust 2021 } macro_rules! quote { (# name = # kind # value) => {}; } quote! { #name = #kind#value //~^ WARNING prefix `kind` is unknown [rust_2021_prefixes_incompatible_syntax] //~| WARNING hard error in Rust 2021 }<|fim▁end|>
fn main() { m2!(z"hey"); //~^ WARNING prefix `z` is unknown [rust_2021_prefixes_incompatible_syntax] //~| WARNING hard error in Rust 2021
<|file_name|>top-panel-wrapper.component.ts<|end_file_name|><|fim▁begin|>/* * Lumeer: Modern Data Definition and Processing Platform * * Copyright (C) since 2017 Lumeer.io, s.r.o. and/or its affiliates. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ import {ChangeDetectionStrategy, Component, ElementRef, HostListener, Input, OnInit} from '@angular/core'; import {BehaviorSubject} from 'rxjs'; @Component({ selector: 'top-panel-wrapper', templateUrl: './top-panel-wrapper.component.html', styleUrls: ['./top-panel-wrapper.component.scss'], changeDetection: ChangeDetectionStrategy.OnPush, }) export class TopPanelWrapperComponent implements OnInit { @Input() public searchBoxShown: boolean; public mobile$ = new BehaviorSubject(true); constructor(private element: ElementRef) {}<|fim▁hole|> public ngOnInit() { this.detectMobileResolution(); } @HostListener('window:resize') public onWindowResize() { this.detectMobileResolution(); } private detectMobileResolution() { this.mobile$.next(window.matchMedia('(max-width: 767.98px)').matches); } }<|fim▁end|>
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|>class InvalidValueState(ValueError):<|fim▁hole|><|fim▁end|>
pass
<|file_name|>QueryDAO.java<|end_file_name|><|fim▁begin|>package dao.inf; // Generated 27/11/2014 02:39:51 AM by Hibernate Tools 3.4.0.CR1 import java.util.List; import javax.naming.InitialContext; import model.Query; import model.QueryId; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.hibernate.LockMode; import org.hibernate.SessionFactory; import org.hibernate.criterion.Example; /** * Home object for domain model class Query. <|fim▁hole|> * @author Hibernate Tools */ public interface QueryDAO { public boolean save(Query query); public Integer lastId(); }<|fim▁end|>
* @see .Query
<|file_name|>vec-to_str.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #[feature(managed_boxes)];<|fim▁hole|> assert_eq!((&[1, 2]).to_str(), ~"[1, 2]"); assert_eq!((@[2, 3]).to_str(), ~"[2, 3]"); let foo = ~[3, 4]; let bar = &[4, 5]; let baz = @[5, 6]; assert_eq!(foo.to_str(), ~"[3, 4]"); assert_eq!(bar.to_str(), ~"[4, 5]"); assert_eq!(baz.to_str(), ~"[5, 6]"); }<|fim▁end|>
pub fn main() { assert_eq!((~[0, 1]).to_str(), ~"[0, 1]");
<|file_name|>purchase_order.py<|end_file_name|><|fim▁begin|><|fim▁hole|># Copyright 2015 AvanzOsc (http://www.avanzosc.es) # Copyright 2015-2016 - Pedro M. Baeza <[email protected]> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl) from odoo import api, models class PurchaseOrder(models.Model): _inherit = 'purchase.order' @api.model def search(self, args, offset=0, limit=None, order=None, count=False): make_po_conditions = { 'partner_id', 'state', 'picking_type_id', 'company_id', 'dest_address_id', } # Restrict the empty return for these conditions if (self.env.context and self.env.context.get('grouping', 'standard') == 'order' and make_po_conditions.issubset(set(x[0] for x in args))): return self.browse() return super(PurchaseOrder, self).search( args, offset=offset, limit=limit, order=order, count=count) class PurchaseOrderLine(models.Model): _inherit = 'purchase.order.line' @api.model def search(self, args, offset=0, limit=None, order=None, count=False): # Restrict the empty return for these conditions if (self.env.context and self.env.context.get('grouping', 'standard') == 'line' and len(args) == 1 and args[0][0] == 'order_id' and args[0][1] == 'in'): return self.browse() return super(PurchaseOrderLine, self).search( args, offset=offset, limit=limit, order=order, count=count)<|fim▁end|>
# -*- coding: utf-8 -*-
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import unicode_literals <|fim▁hole|> class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Alloy', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=30, verbose_name=b"Alloy's name")), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Interpolation', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('a0', models.CharField(max_length=200, verbose_name=b'Lattice parameter')), ('ac', models.CharField(max_length=200, verbose_name=b"Conductions's hydrostatic deformation potential")), ('av', models.CharField(max_length=200, verbose_name=b"Valence's hydrostatic deformation potential")), ('b', models.CharField(max_length=200, verbose_name=b'Deformation potential for tetragonal distorion')), ('c11', models.CharField(max_length=200, verbose_name=b'Elastic constant')), ('c12', models.CharField(max_length=200, verbose_name=b'Elastic constant')), ('me', models.CharField(max_length=200, verbose_name=b'Electron effective mass')), ('mhh', models.CharField(max_length=200, null=True, verbose_name=b'Heavy-hole effective mass')), ('mlh', models.CharField(max_length=200, null=True, verbose_name=b'Light-hole effective mass')), ('eg2', models.CharField(max_length=200, verbose_name=b'Gap energy at 2 K')), ('eg77', models.CharField(max_length=200, verbose_name=b'Gap energy at 77 K')), ('eg300', models.CharField(max_length=200, verbose_name=b'Gap energy at 300 K')), ('alloy', models.ForeignKey(verbose_name=b'The related alloy', to='muraki.Alloy')), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Parameter', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('a0', models.FloatField(verbose_name=b'Lattice parameter')), ('ac', models.FloatField(verbose_name=b"Conductions's hydrostatic deformation potential")), ('av', models.FloatField(verbose_name=b"Valence's hydrostatic deformation potential")), ('b', models.FloatField(verbose_name=b'Deformation potential for tetragonal distorion')), ('c11', models.FloatField(verbose_name=b'Elastic constant')), ('c12', models.FloatField(verbose_name=b'Elastic constant')), ('me', models.FloatField(verbose_name=b'Electron effective mass')), ('mhh', models.FloatField(null=True, verbose_name=b'Heavy-hole effective mass')), ('mlh', models.FloatField(null=True, verbose_name=b'Light-hole effective mass')), ('eg2', models.FloatField(verbose_name=b'Gap energy at 2 K')), ('eg77', models.FloatField(verbose_name=b'Gap energy at 77 K')), ('eg300', models.FloatField(verbose_name=b'Gap energy at 300 K')), ('alloy', models.ForeignKey(verbose_name=b'The related alloy', to='muraki.Alloy')), ], options={ }, bases=(models.Model,), ), ]<|fim▁end|>
from django.db import models, migrations
<|file_name|>e3_2012_7_calendar.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 ''' @author Michele Tomaiuolo - http://www.ce.unipr.it/people/tomamic @license This software is free - http://www.gnu.org/licenses/gpl.html ''' WEEK_DAYS = 7 MAX_WEEKS = 6<|fim▁hole|>first = int(input("first (0-6)? ")) length = int(input("length (28-31)? ")) for i in range (WEEK_DAYS * MAX_WEEKS): day = i + 1 - first if 0 < day <= length: print(f"{:3}".format(day), end="") else: print(" ", end="") if i % WEEK_DAYS == WEEK_DAYS - 1: print() print() for y in range(MAX_WEEKS): for x in range(WEEK_DAYS): day = y * WEEK_DAYS + x + 1 - first if 0 < day <= length: print("{:3}".format(day), end='') else: print(" ", end='') print() print() for y in range(WEEK_DAYS): for x in range(MAX_WEEKS): day = y + x * WEEK_DAYS + 1 - first if 0 < day <= length: print("{:3}".format(day), end='') else: print(" ", end='') print() print()<|fim▁end|>
<|file_name|>backgroundClip.js<|end_file_name|><|fim▁begin|>"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.default = _default; function _default() { return function ({ addUtilities, variants }) { addUtilities({ '.bg-clip-border': { 'background-clip': 'border-box' }, '.bg-clip-padding': { 'background-clip': 'padding-box' }, '.bg-clip-content': { 'background-clip': 'content-box' }, '.bg-clip-text': {<|fim▁hole|> } }, variants('backgroundClip')); }; }<|fim▁end|>
'background-clip': 'text'
<|file_name|>WeatherActivity.java<|end_file_name|><|fim▁begin|>package com.ty.activity; import com.ty.app.R; import com.ty.service.AutoUpdateService; import com.ty.util.HttpCallbackListener; import com.ty.util.HttpUtil; import com.ty.util.Utility; import android.app.Activity; import android.content.Intent; import android.content.SharedPreferences; import android.os.Bundle; import android.preference.PreferenceManager; import android.text.TextUtils; import android.view.View; import android.view.Window; import android.view.View.OnClickListener; import android.widget.Button; import android.widget.LinearLayout; import android.widget.TextView; public class WeatherActivity extends Activity implements OnClickListener{ private LinearLayout weatherInfoLayout; /** * 用于显示城市名 */ private TextView cityNameText; /** * 用于显示发布时间 */ private TextView publishText; /** * 用于显示天气描述信息 */ private TextView weatherDespText; /** * 用于显示气温1 */ private TextView temp1Text; /** * 用于显示气温2 */ private TextView temp2Text; /** * 用于显示当前日期 */ private TextView currentDateText; /** * 切换城市按钮 */ private Button switchCity; /**<|fim▁hole|> private Button refreshWeather; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); requestWindowFeature(Window.FEATURE_NO_TITLE); setContentView(R.layout.weather_layout); // 初始化各控件 weatherInfoLayout = (LinearLayout) findViewById(R.id.tyweather_info_layout); cityNameText = (TextView) findViewById(R.id.tycity_name); publishText = (TextView) findViewById(R.id.typublish_text); weatherDespText = (TextView) findViewById(R.id.tyweather_desp); temp1Text = (TextView) findViewById(R.id.tytemp1); temp2Text = (TextView) findViewById(R.id.tytemp2); currentDateText = (TextView) findViewById(R.id.tycurrent_date); switchCity = (Button) findViewById(R.id.tyswitch_city); refreshWeather = (Button) findViewById(R.id.tyrefresh_weather); String countyCode = getIntent().getStringExtra("county_code"); if (!TextUtils.isEmpty(countyCode)) { // 有县级代号时就去查询天气 publishText.setText("同步中..."); weatherInfoLayout.setVisibility(View.INVISIBLE); cityNameText.setVisibility(View.INVISIBLE); queryWeatherCode(countyCode); } else { // 没有县级代号时就直接显示本地天气 showWeather(); } switchCity.setOnClickListener(this); refreshWeather.setOnClickListener(this); } @Override public void onClick(View v) { switch (v.getId()) { case R.id.tycity_name: Intent intent = new Intent(this, ChooseAreaActivity.class); intent.putExtra("from_weather_activity", true); startActivity(intent); finish(); break; case R.id.tyrefresh_weather: publishText.setText("同步中..."); SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(this); String weatherCode = prefs.getString("weather_code", ""); if (!TextUtils.isEmpty(weatherCode)) { queryWeatherInfo(weatherCode); } break; default: break; } } /** * 查询县级代号所对应的天气代号。 */ private void queryWeatherCode(String countyCode) { String address = "http://www.weather.com.cn/data/list3/city" + countyCode + ".xml"; queryFromServer(address, "countyCode"); } /** * 查询天气代号所对应的天气。 */ private void queryWeatherInfo(String weatherCode) { String address = "http://www.weather.com.cn/data/cityinfo/" + weatherCode + ".html"; queryFromServer(address, "weatherCode"); } /** * 根据传入的地址和类型去向服务器查询天气代号或者天气信息。 */ private void queryFromServer(final String address, final String type) { HttpUtil.sendHttpRequest(address, new HttpCallbackListener() { @Override public void onFinish(final String response) { if ("countyCode".equals(type)) { if (!TextUtils.isEmpty(response)) { // 从服务器返回的数据中解析出天气代号 String[] array = response.split("\\|"); if (array != null && array.length == 2) { String weatherCode = array[1]; queryWeatherInfo(weatherCode); } } } else if ("weatherCode".equals(type)) { // 处理服务器返回的天气信息 Utility.handleWeatherResponse(WeatherActivity.this, response); runOnUiThread(new Runnable() { @Override public void run() { showWeather(); } }); } } @Override public void onError(Exception e) { runOnUiThread(new Runnable() { @Override public void run() { publishText.setText("同步失败"); } }); } }); } /** * 从SharedPreferences文件中读取存储的天气信息,并显示到界面上。 */ private void showWeather() { SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(this); cityNameText.setText( prefs.getString("city_name", "")); temp1Text.setText(prefs.getString("temp1", "")); temp2Text.setText(prefs.getString("temp2", "")); weatherDespText.setText(prefs.getString("weather_desp", "")); publishText.setText("今天" + prefs.getString("publish_time", "") + "发布"); currentDateText.setText(prefs.getString("current_date", "")); weatherInfoLayout.setVisibility(View.VISIBLE); cityNameText.setVisibility(View.VISIBLE); Intent intent = new Intent(this, AutoUpdateService.class); startService(intent); } }<|fim▁end|>
* 更新天气按钮 */
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># # Ophidia WPS Module # Copyright (C) 2015-2021 CMCC Foundation # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by<|fim▁hole|># but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #<|fim▁end|>
# the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful,
<|file_name|>GenesisShaderBison.cpp<|end_file_name|><|fim▁begin|>/* A Bison parser, made by GNU Bison 2.4.1. */ /* Skeleton implementation for Bison's Yacc-like parsers in C Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* As a special exception, you may create a larger work that contains part or all of the Bison parser skeleton and distribute that work under terms of your choice, so long as that work isn't itself a parser generator using the skeleton or a modified version thereof as a parser skeleton. Alternatively, if you modify or redistribute the parser skeleton itself, you may (at your option) remove this special exception, which will cause the skeleton and the resulting Bison output files to be licensed under the GNU General Public License without this special exception. This special exception was added by the Free Software Foundation in version 2.2 of Bison. */ /* C LALR(1) parser skeleton written by Richard Stallman, by simplifying the original so-called "semantic" parser. */ /* All symbols defined below should begin with yy or YY, to avoid infringing on user name space. This should be done even for local variables, as they might otherwise be expanded by user macros. There are some unavoidable exceptions within include files to define necessary library symbols; they are noted "INFRINGES ON USER NAME SPACE" below. */ /* Identify Bison output. */ #define YYBISON 1 /* Bison version. */ #define YYBISON_VERSION "2.4.1" /* Skeleton name. */ #define YYSKELETON_NAME "yacc.c" /* Pure parsers. */ #define YYPURE 0 /* Push parsers. */ #define YYPUSH 0 /* Pull parsers. */ #define YYPULL 1 /* Using locations. */ #define YYLSP_NEEDED 0 /* Substitute the variable and function names. */ #define yyparse Genesisparse #define yylex Genesislex #define yyerror Genesiserror #define yylval Genesislval #define yychar Genesischar #define yydebug Genesisdebug #define yynerrs Genesisnerrs /* Copy the first part of user declarations. */ #include "stdneb.h" #include "rendersystem/base/RenderDeviceTypes.h" #include "GenesisShaderParser.h" #include "../GenesisMaterial.h" #include "addons/shadercompiler/Utility/ShaderCompilerUtil.h" void ResetParserParams(); int yyerror (const char *s); extern int Genesislineno; extern char* yytext; int yylex (); using namespace GenesisMaterialMaker; using namespace ShaderProgramCompiler; GenesisMaterial* g_GenesisMaterial; static GenesisMakePass* g_curMakePass = NULL; static GenesisMakeTechnique* g_curGenesisMakeTechnique = NULL; static GenesisMakeMaterial* g_curGenesisMakeMaterial = NULL; static GenesisMakeGPUProgram* g_curGenesisMakeGPUProgram = NULL; static Graphic::ShaderParam* g_curShaderParameter = NULL; static Graphic::MaterialParam* g_curMatParam = NULL; static GPtr<RenderBase::RenderStateDesc> g_rsDesc = 0; #define ASSIGN(s,d) {s = *d; delete d;} #define YYDEBUG 1 /* Enabling traces. */ #ifndef YYDEBUG # define YYDEBUG 0 #endif /* Enabling verbose error messages. */ #ifdef YYERROR_VERBOSE # undef YYERROR_VERBOSE # define YYERROR_VERBOSE 1 #else # define YYERROR_VERBOSE 0 #endif /* Enabling the token table. */ #ifndef YYTOKEN_TABLE # define YYTOKEN_TABLE 0 #endif /* Tokens. */ #ifndef YYTOKENTYPE # define YYTOKENTYPE /* Put the tokens into the symbol table, so that GDB and other debuggers know about them. */ enum yytokentype { TSHADER = 258, TTECHNIQUE = 259, TPASS = 260, TSETSHADERCODE = 261, TPARAMETERS = 262, TRENDERQUEUE = 263, TSHADERTYPE = 264, TRENDERDEVICETYPE = 265, TSETPARAM = 266, TRENDERSTATE = 267, TCULLMODE = 268, TFILLMODE = 269, TCOLORMASK = 270, TDEPTHTEST = 271, TDEPTHWRITE = 272, TBLENDCOLOR = 273, TALPHATEST = 274, TSAMPLER = 275, TMATTYPE = 276, TMATRIX = 277, TVECTOR = 278, TFLOAT = 279, TTEXTURE = 280, TREALSTRING = 281, TVAR = 282, TOPERATOR = 283, TNUMBER = 284, TBOOLEAN = 285 }; #endif /* Tokens. */ #define TSHADER 258 #define TTECHNIQUE 259 #define TPASS 260 #define TSETSHADERCODE 261 #define TPARAMETERS 262 #define TRENDERQUEUE 263 #define TSHADERTYPE 264 #define TRENDERDEVICETYPE 265 #define TSETPARAM 266 #define TRENDERSTATE 267 #define TCULLMODE 268 #define TFILLMODE 269 #define TCOLORMASK 270 #define TDEPTHTEST 271 #define TDEPTHWRITE 272 #define TBLENDCOLOR 273 #define TALPHATEST 274 #define TSAMPLER 275 #define TMATTYPE 276 #define TMATRIX 277 #define TVECTOR 278 #define TFLOAT 279 #define TTEXTURE 280 #define TREALSTRING 281 #define TVAR 282 #define TOPERATOR 283 #define TNUMBER 284 #define TBOOLEAN 285 #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED typedef union YYSTYPE { char* str; float num; Graphic::ShaderParamType spt; bool boolean; } YYSTYPE; # define YYSTYPE_IS_TRIVIAL 1 # define yystype YYSTYPE /* obsolescent; will be withdrawn */ # define YYSTYPE_IS_DECLARED 1 #endif /* Copy the second part of user declarations. */ #ifdef short # undef short #endif #ifdef YYTYPE_UINT8 typedef YYTYPE_UINT8 yytype_uint8; #else typedef unsigned char yytype_uint8; #endif #ifdef YYTYPE_INT8 typedef YYTYPE_INT8 yytype_int8; #elif (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) typedef signed char yytype_int8; #else typedef short int yytype_int8; #endif #ifdef YYTYPE_UINT16 typedef YYTYPE_UINT16 yytype_uint16; #else typedef unsigned short int yytype_uint16; #endif #ifdef YYTYPE_INT16 typedef YYTYPE_INT16 yytype_int16; #else typedef short int yytype_int16; #endif #ifndef YYSIZE_T # ifdef __SIZE_TYPE__ # define YYSIZE_T __SIZE_TYPE__ # elif defined size_t # define YYSIZE_T size_t # elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) # include <stddef.h> /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # else # define YYSIZE_T unsigned int # endif #endif #define YYSIZE_MAXIMUM ((YYSIZE_T) -1) #ifndef YY_ # if YYENABLE_NLS # if ENABLE_NLS # include <libintl.h> /* INFRINGES ON USER NAME SPACE */ # define YY_(msgid) dgettext ("bison-runtime", msgid) # endif # endif # ifndef YY_ # define YY_(msgid) msgid # endif #endif /* Suppress unused-variable warnings by "using" E. */ #if ! defined lint || defined __GNUC__ # define YYUSE(e) ((void) (e)) #else # define YYUSE(e) /* empty */ #endif /* Identity function, used to suppress warnings about constant conditions. */ #ifndef lint # define YYID(n) (n) #else #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static int YYID (int yyi) #else static int YYID (yyi) int yyi; #endif { return yyi; } #endif #if ! defined yyoverflow || YYERROR_VERBOSE /* The parser invokes alloca or malloc; define the necessary symbols. */ # ifdef YYSTACK_USE_ALLOCA # if YYSTACK_USE_ALLOCA # ifdef __GNUC__ # define YYSTACK_ALLOC __builtin_alloca # elif defined __BUILTIN_VA_ARG_INCR # include <alloca.h> /* INFRINGES ON USER NAME SPACE */ # elif defined _AIX # define YYSTACK_ALLOC __alloca # elif defined _MSC_VER # include <malloc.h> /* INFRINGES ON USER NAME SPACE */ # define alloca _alloca # else # define YYSTACK_ALLOC alloca # if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) # include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ # ifndef _STDLIB_H # define _STDLIB_H 1 # endif # endif # endif # endif # endif # ifdef YYSTACK_ALLOC /* Pacify GCC's `empty if-body' warning. */ # define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0)) # ifndef YYSTACK_ALLOC_MAXIMUM /* The OS might guarantee only one guard page at the bottom of the stack, and a page size can be as small as 4096 bytes. So we cannot safely invoke alloca (N) if N exceeds 4096. Use a slightly smaller number to allow for a few compiler-allocated temporary stack slots. */ # define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */ # endif # else # define YYSTACK_ALLOC YYMALLOC # define YYSTACK_FREE YYFREE # ifndef YYSTACK_ALLOC_MAXIMUM # define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM # endif # if (defined __cplusplus && ! defined _STDLIB_H \ && ! ((defined YYMALLOC || defined malloc) \ && (defined YYFREE || defined free))) # include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ # ifndef _STDLIB_H # define _STDLIB_H 1 # endif # endif # ifndef YYMALLOC # define YYMALLOC malloc # if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ # endif # endif # ifndef YYFREE # define YYFREE free # if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) void free (void *); /* INFRINGES ON USER NAME SPACE */ # endif # endif # endif #endif /* ! defined yyoverflow || YYERROR_VERBOSE */ #if (! defined yyoverflow \ && (! defined __cplusplus \ || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) /* A type that is properly aligned for any stack member. */ union yyalloc { yytype_int16 yyss_alloc; YYSTYPE yyvs_alloc; }; /* The size of the maximum gap between one aligned stack and the next. */ # define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1) /* The size of an array large to enough to hold all stacks, each with N elements. */ # define YYSTACK_BYTES(N) \ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \ + YYSTACK_GAP_MAXIMUM) /* Copy COUNT objects from FROM to TO. The source and destination do not overlap. */ # ifndef YYCOPY # if defined __GNUC__ && 1 < __GNUC__ # define YYCOPY(To, From, Count) \ __builtin_memcpy (To, From, (Count) * sizeof (*(From))) # else # define YYCOPY(To, From, Count) \ do \ { \ YYSIZE_T yyi; \ for (yyi = 0; yyi < (Count); yyi++) \ (To)[yyi] = (From)[yyi]; \ } \ while (YYID (0)) # endif # endif /* Relocate STACK from its old location to the new one. The local variables YYSIZE and YYSTACKSIZE give the old and new number of elements in the stack, and YYPTR gives the new location of the stack. Advance YYPTR to a properly aligned location for the next stack. */ # define YYSTACK_RELOCATE(Stack_alloc, Stack) \ do \ { \ YYSIZE_T yynewbytes; \ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ Stack = &yyptr->Stack_alloc; \ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ yyptr += yynewbytes / sizeof (*yyptr); \ } \ while (YYID (0)) #endif /* YYFINAL -- State number of the termination state. */ #define YYFINAL 4 /* YYLAST -- Last index in YYTABLE. */ #define YYLAST 100 /* YYNTOKENS -- Number of terminals. */ #define YYNTOKENS 34 /* YYNNTS -- Number of nonterminals. */ #define YYNNTS 36 /* YYNRULES -- Number of rules. */ #define YYNRULES 73 /* YYNRULES -- Number of states. */ #define YYNSTATES 130 /* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */ #define YYUNDEFTOK 2 #define YYMAXUTOK 285 #define YYTRANSLATE(YYX) \ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) /* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */ static const yytype_uint8 yytranslate[] = { 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 33, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 31, 2, 32, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 }; #if YYDEBUG /* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in YYRHS. */ static const yytype_uint8 yyprhs[] = { 0, 0, 3, 4, 5, 6, 15, 16, 17, 18, 26, 30, 31, 37, 38, 45, 46, 52, 59, 66, 72, 79, 85, 92, 98, 99, 100, 104, 105, 109, 110, 111, 112, 122, 123, 126, 127, 130, 133, 134, 135, 136, 144, 145, 149, 153, 157, 160, 164, 167, 170, 173, 177, 182, 185, 188, 193, 196, 200, 203, 204, 205, 206, 215, 216, 217, 218, 219, 229, 230, 234, 240, 246, 252 }; /* YYRHS -- A `-1'-separated list of the rules' RHS. */ static const yytype_int8 yyrhs[] = { 35, 0, -1, -1, -1, -1, 3, 26, 36, 31, 37, 39, 38, 32, -1, -1, -1, -1, 39, 7, 31, 40, 44, 41, 32, -1, 39, 8, 26, -1, -1, 39, 4, 31, 42, 45, -1, -1, 39, 4, 26, 31, 43, 45, -1, -1, 44, 25, 27, 33, 26, -1, 44, 25, 27, 28, 33, 26, -1, 44, 22, 27, 28, 33, 26, -1, 44, 22, 27, 33, 26, -1, 44, 23, 27, 28, 33, 26, -1, 44, 23, 27, 33, 26, -1, 44, 24, 27, 28, 33, 26, -1, 44, 24, 27, 33, 26, -1, -1, -1, 47, 46, 32, -1, -1, 47, 21, 26, -1, -1, -1, -1, 47, 5, 51, 48, 31, 49, 52, 50, 32, -1, -1, 51, 26, -1, -1, 52, 53, -1, 52, 61, -1, -1, -1, -1, 53, 12, 31, 54, 56, 55, 32, -1, -1, 56, 13, 26, -1, 56, 14, 26, -1, 56, 15, 26, -1, 56, 58, -1, 56, 17, 30, -1, 56, 59, -1, 56, 60, -1, 56, 57, -1, 20, 26, 27, -1, 20, 26, 27, 27, -1, 16, 27, -1, 16, 30, -1, 18, 27, 26, 26, -1, 18, 30, -1, 19, 27, 26, -1, 19, 30, -1, -1, -1, -1, 9, 26, 62, 31, 63, 65, 64, 32, -1, -1, -1, -1, -1, 65, 10, 26, 66, 31, 67, 69, 68, 32, -1, -1, 69, 6, 26, -1, 69, 11, 29, 27, 25, -1, 69, 11, 29, 27, 22, -1, 69, 11, 29, 27, 23, -1, 69, 11, 29, 27, 24, -1 }; /* YYRLINE[YYN] -- source line where rule number YYN was defined. */ static const yytype_uint16 yyrline[] = { 0, 68, 68, 74, 76, 68, 83, 85, 87, 85, 91, 94, 94, 99, 99, 105, 107, 151, 195, 203, 211, 219, 227, 235, 244, 246, 246, 254, 256, 259, 261, 264, 259, 273, 278, 284, 286, 288, 291, 293, 298, 293, 305, 307, 312, 317, 322, 324, 329, 331, 333, 336, 339, 343, 350, 357, 366, 373, 390, 397, 403, 405, 397, 416, 417, 422, 424, 417, 430, 432, 437, 446, 455, 464 }; #endif #if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. First, the terminals, then, starting at YYNTOKENS, nonterminals. */ static const char *const yytname[] = { "$end", "error", "$undefined", "TSHADER", "TTECHNIQUE", "TPASS", "TSETSHADERCODE", "TPARAMETERS", "TRENDERQUEUE", "TSHADERTYPE", "TRENDERDEVICETYPE", "TSETPARAM", "TRENDERSTATE", "TCULLMODE", "TFILLMODE", "TCOLORMASK", "TDEPTHTEST", "TDEPTHWRITE", "TBLENDCOLOR", "TALPHATEST", "TSAMPLER", "TMATTYPE", "TMATRIX", "TVECTOR", "TFLOAT", "TTEXTURE", "TREALSTRING", "TVAR", "TOPERATOR", "TNUMBER", "TBOOLEAN", "'{'", "'}'", "'='", "$accept", "shader", "$@1", "$@2", "$@3", "PropertySection", "$@4", "$@5", "$@6", "$@7", "ParameterSection", "TechniqueSection", "$@8", "PassSection", "$@9", "$@10", "$@11", "PassType", "codeSection", "StateSection", "$@12", "$@13", "RenderStateSetup", "SamplerSetup", "DepthTestSetup", "BlendSetup", "AlphaTestSetup", "shadertype", "$@14", "$@15", "$@16", "DeviceTypeSetup", "$@17", "$@18", "$@19", "CodeBlock", 0 }; #endif # ifdef YYPRINT /* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to token YYLEX-NUM. */ static const yytype_uint16 yytoknum[] = { 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 123, 125, 61 }; # endif /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ static const yytype_uint8 yyr1[] = { 0, 34, 36, 37, 38, 35, 39, 40, 41, 39, 39, 42, 39, 43, 39, 44, 44, 44, 44, 44, 44, 44, 44, 44, 45, 46, 45, 47, 47, 48, 49, 50, 47, 51, 51, 52, 52, 52, 53, 54, 55, 53, 56, 56, 56, 56, 56, 56, 56, 56, 56, 57, 57, 58, 58, 59, 59, 60, 60, 62, 63, 64, 61, 65, 66, 67, 68, 65, 69, 69, 69, 69, 69, 69 }; /* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */ static const yytype_uint8 yyr2[] = { 0, 2, 0, 0, 0, 8, 0, 0, 0, 7, 3, 0, 5, 0, 6, 0, 5, 6, 6, 5, 6, 5, 6, 5, 0, 0, 3, 0, 3, 0, 0, 0, 9, 0, 2, 0, 2, 2, 0, 0, 0, 7, 0, 3, 3, 3, 2, 3, 2, 2, 2, 3, 4, 2, 2, 4, 2, 3, 2, 0, 0, 0, 8, 0, 0, 0, 0, 9, 0, 3, 5, 5, 5, 5 }; /* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state STATE-NUM when YYTABLE doesn't specify something else to do. Zero means the default is an error. */ static const yytype_uint8 yydefact[] = { 0, 0, 0, 2, 1, 0, 3, 6, 4, 0, 0, 0, 0, 0, 11, 7, 10, 5, 13, 24, 15, 24, 12, 25, 8, 14, 33, 0, 0, 0, 0, 0, 0, 0, 29, 28, 26, 0, 0, 0, 0, 9, 34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 0, 19, 0, 21, 0, 23, 0, 16, 35, 18, 20, 22, 17, 31, 0, 0, 36, 37, 59, 32, 0, 0, 39, 60, 42, 63, 40, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 50, 46, 48, 49, 0, 0, 43, 44, 45, 53, 54, 47, 0, 56, 0, 58, 0, 41, 64, 62, 0, 57, 51, 0, 55, 52, 65, 68, 66, 0, 0, 0, 69, 0, 67, 0, 71, 72, 73, 70 }; /* YYDEFGOTO[NTERM-NUM]. */ static const yytype_int8 yydefgoto[] = { -1, 2, 5, 7, 12, 8, 20, 33, 19, 21, 24, 22, 28, 23, 43, 61, 68, 34, 66, 69, 77, 89, 79, 90, 91, 92, 93, 70, 74, 78, 95, 80, 113, 117, 121, 118 }; /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing STATE-NUM. */ #define YYPACT_NINF -17 static const yytype_int8 yypact[] = { 8, -3, 12, -17, -17, -2, -17, -17, 24, -16, -1, 17, 10, 21, -17, -17, -17, -17, -17, -5, -17, -5, -17, -4, 11, -17, -17, 25, 22, 26, 28, 29, 30, 27, 34, -17, -17, -15, -14, -8, -7, -17, -17, 31, -9, 35, 33, 37, 36, 38, 39, 41, -17, 42, -17, 44, -17, 45, -17, 47, -17, -17, -17, -17, -17, -17, 32, 48, 43, 46, -17, -17, -17, 49, 50, -17, -17, -17, -17, -11, 55, 51, 52, 53, 18, 54, 19, 20, 56, 57, -17, -17, -17, -17, 59, 58, -17, -17, -17, -17, -17, -17, 60, -17, 61, -17, 64, -17, -17, -17, 62, -17, 65, 63, -17, -17, -17, -17, 16, 67, 66, 68, -17, 69, -17, 15, -17, -17, -17, -17 }; /* YYPGOTO[NTERM-NUM]. */ static const yytype_int8 yypgoto[] = { -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, 76, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17 }; /* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If positive, shift that token. If negative, reduce the rule which number is the opposite. If zero, do what YYDEFACT says. If YYTABLE_NINF, syntax error. */ #define YYTABLE_NINF -39 static const yytype_int16 yytable[] = { -27, 26, 81, 82, 83, 84, 85, 86, 87, 88, 13, 1, 4, 44, 46, 14, -27, 27, 45, 47, 48, 50, 119, 3, 53, 49, 51, 120, 9, 6, 15, 10, 11, 29, 30, 31, 32, 126, 127, 128, 129, 67, 17, 16, -38, 99, 102, 104, 100, 103, 105, 35, 18, 37, 36, 38, 39, 40, 73, 41, 42, 54, 52, 56, 58, 94, 55, 60, 62, 57, 63, 64, 59, 65, 71, 72, 0, 96, 97, 98, 75, 76, 106, 0, 101, 108, 110, 111, 114, 107, 109, 112, 115, 122, 116, 123, 125, 25, 0, 0, 124 }; static const yytype_int8 yycheck[] = { 5, 5, 13, 14, 15, 16, 17, 18, 19, 20, 26, 3, 0, 28, 28, 31, 21, 21, 33, 33, 28, 28, 6, 26, 33, 33, 33, 11, 4, 31, 31, 7, 8, 22, 23, 24, 25, 22, 23, 24, 25, 9, 32, 26, 12, 27, 27, 27, 30, 30, 30, 26, 31, 27, 32, 27, 27, 27, 12, 32, 26, 26, 31, 26, 26, 10, 33, 26, 26, 33, 26, 26, 33, 26, 26, 32, -1, 26, 26, 26, 31, 31, 26, -1, 30, 26, 26, 26, 26, 32, 32, 27, 27, 26, 31, 29, 27, 21, -1, -1, 32 }; /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing symbol of state STATE-NUM. */ static const yytype_uint8 yystos[] = { 0, 3, 35, 26, 0, 36, 31, 37, 39, 4, 7, 8, 38, 26, 31, 31, 26, 32, 31, 42, 40, 43, 45, 47, 44, 45, 5, 21, 46, 22, 23, 24, 25, 41, 51, 26, 32, 27, 27, 27, 27, 32, 26, 48, 28, 33, 28, 33, 28, 33, 28, 33, 31, 33, 26, 33, 26, 33, 26, 33, 26, 49, 26, 26, 26, 26, 52, 9, 50, 53, 61, 26, 32, 12, 62, 31, 31, 54, 63, 56, 65, 13, 14, 15, 16, 17, 18, 19, 20, 55, 57, 58, 59, 60, 10, 64, 26, 26, 26, 27, 30, 30, 27, 30, 27, 30, 26, 32, 26, 32, 26, 26, 27, 66, 26, 27, 31, 67, 69, 6, 11, 68, 26, 29, 32, 27, 22, 23, 24, 25 }; #define yyerrok (yyerrstatus = 0) #define yyclearin (yychar = YYEMPTY) #define YYEMPTY (-2) #define YYEOF 0 #define YYACCEPT goto yyacceptlab #define YYABORT goto yyabortlab #define YYERROR goto yyerrorlab /* Like YYERROR except do call yyerror. This remains here temporarily to ease the transition to the new meaning of YYERROR, for GCC. Once GCC version 2 has supplanted version 1, this can go. */ #define YYFAIL goto yyerrlab #define YYRECOVERING() (!!yyerrstatus) #define YYBACKUP(Token, Value) \ do \ if (yychar == YYEMPTY && yylen == 1) \ { \ yychar = (Token); \ yylval = (Value); \ yytoken = YYTRANSLATE (yychar); \ YYPOPSTACK (1); \ goto yybackup; \ } \ else \ { \ yyerror (YY_("syntax error: cannot back up")); \ YYERROR; \ } \ while (YYID (0)) #define YYTERROR 1 #define YYERRCODE 256 /* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N]. If N is 0, then set CURRENT to the empty location which ends the previous symbol: RHS[0] (always defined). */ #define YYRHSLOC(Rhs, K) ((Rhs)[K]) #ifndef YYLLOC_DEFAULT # define YYLLOC_DEFAULT(Current, Rhs, N) \ do \ if (YYID (N)) \ { \ (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \ (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \ (Current).last_line = YYRHSLOC (Rhs, N).last_line; \ (Current).last_column = YYRHSLOC (Rhs, N).last_column; \ } \ else \ { \ (Current).first_line = (Current).last_line = \ YYRHSLOC (Rhs, 0).last_line; \ (Current).first_column = (Current).last_column = \ YYRHSLOC (Rhs, 0).last_column; \ } \ while (YYID (0)) #endif /* YY_LOCATION_PRINT -- Print the location on the stream. This macro was not mandated originally: define only if we know we won't break user code: when these are the locations we know. */ #ifndef YY_LOCATION_PRINT # if YYLTYPE_IS_TRIVIAL # define YY_LOCATION_PRINT(File, Loc) \ fprintf (File, "%d.%d-%d.%d", \ (Loc).first_line, (Loc).first_column, \ (Loc).last_line, (Loc).last_column) # else # define YY_LOCATION_PRINT(File, Loc) ((void) 0) # endif #endif /* YYLEX -- calling `yylex' with the right arguments. */ #ifdef YYLEX_PARAM # define YYLEX yylex (YYLEX_PARAM) #else # define YYLEX yylex () #endif /* Enable debugging if requested. */ #if YYDEBUG # ifndef YYFPRINTF # include <stdio.h> /* INFRINGES ON USER NAME SPACE */ # define YYFPRINTF fprintf # endif # define YYDPRINTF(Args) \ do { \ if (yydebug) \ YYFPRINTF Args; \ } while (YYID (0)) # define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ do { \ if (yydebug) \ { \ YYFPRINTF (stderr, "%s ", Title); \ yy_symbol_print (stderr, \ Type, Value); \ YYFPRINTF (stderr, "\n"); \ } \ } while (YYID (0)) /*--------------------------------. | Print this symbol on YYOUTPUT. | `--------------------------------*/ /*ARGSUSED*/ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static void yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) #else static void yy_symbol_value_print (yyoutput, yytype, yyvaluep) FILE *yyoutput; int yytype; YYSTYPE const * const yyvaluep; #endif { if (!yyvaluep) return; # ifdef YYPRINT if (yytype < YYNTOKENS) YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); # else YYUSE (yyoutput); # endif switch (yytype) { default: break; } } /*--------------------------------. | Print this symbol on YYOUTPUT. | `--------------------------------*/ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static void yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) #else static void yy_symbol_print (yyoutput, yytype, yyvaluep) FILE *yyoutput; int yytype; YYSTYPE const * const yyvaluep; #endif { if (yytype < YYNTOKENS) YYFPRINTF (yyoutput, "token %s (", yytname[yytype]); else YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]); yy_symbol_value_print (yyoutput, yytype, yyvaluep); YYFPRINTF (yyoutput, ")"); } /*------------------------------------------------------------------. | yy_stack_print -- Print the state stack from its BOTTOM up to its | | TOP (included). | `------------------------------------------------------------------*/ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static void yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop) #else static void yy_stack_print (yybottom, yytop) yytype_int16 *yybottom; yytype_int16 *yytop; #endif { YYFPRINTF (stderr, "Stack now"); for (; yybottom <= yytop; yybottom++) { int yybot = *yybottom; YYFPRINTF (stderr, " %d", yybot); } YYFPRINTF (stderr, "\n"); } # define YY_STACK_PRINT(Bottom, Top) \ do { \ if (yydebug) \ yy_stack_print ((Bottom), (Top)); \ } while (YYID (0)) /*------------------------------------------------. | Report that the YYRULE is going to be reduced. | `------------------------------------------------*/ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static void yy_reduce_print (YYSTYPE *yyvsp, int yyrule) #else static void yy_reduce_print (yyvsp, yyrule) YYSTYPE *yyvsp; int yyrule; #endif { int yynrhs = yyr2[yyrule]; int yyi; unsigned long int yylno = yyrline[yyrule]; YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n", yyrule - 1, yylno); /* The symbols being reduced. */ for (yyi = 0; yyi < yynrhs; yyi++) { YYFPRINTF (stderr, " $%d = ", yyi + 1); yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi], &(yyvsp[(yyi + 1) - (yynrhs)]) ); YYFPRINTF (stderr, "\n"); } } # define YY_REDUCE_PRINT(Rule) \ do { \ if (yydebug) \ yy_reduce_print (yyvsp, Rule); \ } while (YYID (0)) /* Nonzero means print parse trace. It is left uninitialized so that multiple parsers can coexist. */ int yydebug; #else /* !YYDEBUG */ # define YYDPRINTF(Args) # define YY_SYMBOL_PRINT(Title, Type, Value, Location) # define YY_STACK_PRINT(Bottom, Top) # define YY_REDUCE_PRINT(Rule) #endif /* !YYDEBUG */ /* YYINITDEPTH -- initial size of the parser's stacks. */ #ifndef YYINITDEPTH # define YYINITDEPTH 200 #endif /* YYMAXDEPTH -- maximum size the stacks can grow to (effective only if the built-in stack extension method is used). Do not make this value too large; the results are undefined if YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) evaluated with infinite-precision integer arithmetic. */ #ifndef YYMAXDEPTH # define YYMAXDEPTH 10000 #endif #if YYERROR_VERBOSE # ifndef yystrlen # if defined __GLIBC__ && defined _STRING_H # define yystrlen strlen # else /* Return the length of YYSTR. */ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static YYSIZE_T yystrlen (const char *yystr) #else static YYSIZE_T yystrlen (yystr) const char *yystr; #endif { YYSIZE_T yylen; for (yylen = 0; yystr[yylen]; yylen++) continue; return yylen; } # endif # endif # ifndef yystpcpy # if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE # define yystpcpy stpcpy # else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in YYDEST. */ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static char * yystpcpy (char *yydest, const char *yysrc) #else static char * yystpcpy (yydest, yysrc) char *yydest; const char *yysrc; #endif { char *yyd = yydest; const char *yys = yysrc; while ((*yyd++ = *yys++) != '\0') continue; return yyd - 1; } # endif # endif # ifndef yytnamerr /* Copy to YYRES the contents of YYSTR after stripping away unnecessary quotes and backslashes, so that it's suitable for yyerror. The heuristic is that double-quoting is unnecessary unless the string contains an apostrophe, a comma, or backslash (other than backslash-backslash). YYSTR is taken from yytname. If YYRES is null, do not copy; instead, return the length of what the result would have been. */ static YYSIZE_T yytnamerr (char *yyres, const char *yystr) { if (*yystr == '"') { YYSIZE_T yyn = 0; char const *yyp = yystr; for (;;) switch (*++yyp) { case '\'': case ',': goto do_not_strip_quotes; case '\\': if (*++yyp != '\\') goto do_not_strip_quotes; /* Fall through. */ default: if (yyres) yyres[yyn] = *yyp; yyn++; break; case '"': if (yyres) yyres[yyn] = '\0'; return yyn; } do_not_strip_quotes: ; } if (! yyres) return yystrlen (yystr); return yystpcpy (yyres, yystr) - yyres; } # endif /* Copy into YYRESULT an error message about the unexpected token YYCHAR while in state YYSTATE. Return the number of bytes copied, including the terminating null byte. If YYRESULT is null, do not copy anything; just return the number of bytes that would be copied. As a special case, return 0 if an ordinary "syntax error" message will do. Return YYSIZE_MAXIMUM if overflow occurs during size calculation. */ static YYSIZE_T yysyntax_error (char *yyresult, int yystate, int yychar) { int yyn = yypact[yystate]; if (! (YYPACT_NINF < yyn && yyn <= YYLAST)) return 0; else { int yytype = YYTRANSLATE (yychar); YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]); YYSIZE_T yysize = yysize0; YYSIZE_T yysize1; int yysize_overflow = 0; enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; int yyx; # if 0 /* This is so xgettext sees the translatable formats that are constructed on the fly. */ YY_("syntax error, unexpected %s"); YY_("syntax error, unexpected %s, expecting %s"); YY_("syntax error, unexpected %s, expecting %s or %s"); YY_("syntax error, unexpected %s, expecting %s or %s or %s"); YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"); # endif char *yyfmt; char const *yyf; static char const yyunexpected[] = "syntax error, unexpected %s"; static char const yyexpecting[] = ", expecting %s"; static char const yyor[] = " or %s"; char yyformat[sizeof yyunexpected + sizeof yyexpecting - 1 + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2) * (sizeof yyor - 1))]; char const *yyprefix = yyexpecting; /* Start YYX at -YYN if negative to avoid negative indexes in YYCHECK. */ int yyxbegin = yyn < 0 ? -yyn : 0; /* Stay within bounds of both yycheck and yytname. */ int yychecklim = YYLAST - yyn + 1; int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; int yycount = 1; yyarg[0] = yytname[yytype]; yyfmt = yystpcpy (yyformat, yyunexpected); for (yyx = yyxbegin; yyx < yyxend; ++yyx) if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR) { if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) { yycount = 1; yysize = yysize0; yyformat[sizeof yyunexpected - 1] = '\0'; break; } yyarg[yycount++] = yytname[yyx]; yysize1 = yysize + yytnamerr (0, yytname[yyx]); yysize_overflow |= (yysize1 < yysize); yysize = yysize1; yyfmt = yystpcpy (yyfmt, yyprefix); yyprefix = yyor; } yyf = YY_(yyformat); yysize1 = yysize + yystrlen (yyf); yysize_overflow |= (yysize1 < yysize); yysize = yysize1; if (yysize_overflow) return YYSIZE_MAXIMUM; if (yyresult) { /* Avoid sprintf, as that infringes on the user's name space. Don't have undefined behavior even if the translation produced a string with the wrong number of "%s"s. */ char *yyp = yyresult; int yyi = 0; while ((*yyp = *yyf) != '\0') { if (*yyp == '%' && yyf[1] == 's' && yyi < yycount) { yyp += yytnamerr (yyp, yyarg[yyi++]); yyf += 2; } else { yyp++; yyf++; } } } return yysize; } } #endif /* YYERROR_VERBOSE */ /*-----------------------------------------------. | Release the memory associated to this symbol. | `-----------------------------------------------*/ /*ARGSUSED*/ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static void yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep) #else static void yydestruct (yymsg, yytype, yyvaluep) const char *yymsg; int yytype; YYSTYPE *yyvaluep; #endif { YYUSE (yyvaluep); if (!yymsg) yymsg = "Deleting"; YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); switch (yytype) { default: break; } } /* Prevent warnings from -Wmissing-prototypes. */ #ifdef YYPARSE_PARAM #if defined __STDC__ || defined __cplusplus int yyparse (void *YYPARSE_PARAM); #else int yyparse (); #endif #else /* ! YYPARSE_PARAM */ #if defined __STDC__ || defined __cplusplus int yyparse (void); #else int yyparse (); #endif #endif /* ! YYPARSE_PARAM */ /* The lookahead symbol. */ int yychar; /* The semantic value of the lookahead symbol. */ YYSTYPE yylval; /* Number of syntax errors so far. */ int yynerrs; /*-------------------------. | yyparse or yypush_parse. | `-------------------------*/ #ifdef YYPARSE_PARAM #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) int yyparse (void *YYPARSE_PARAM) #else int yyparse (YYPARSE_PARAM) void *YYPARSE_PARAM; #endif #else /* ! YYPARSE_PARAM */ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) int yyparse (void) #else int yyparse () #endif #endif { int yystate; /* Number of tokens to shift before error messages enabled. */ int yyerrstatus; /* The stacks and their tools: `yyss': related to states. `yyvs': related to semantic values. Refer to the stacks thru separate pointers, to allow yyoverflow to reallocate them elsewhere. */ /* The state stack. */ yytype_int16 yyssa[YYINITDEPTH]; yytype_int16 *yyss; yytype_int16 *yyssp; /* The semantic value stack. */ YYSTYPE yyvsa[YYINITDEPTH]; YYSTYPE *yyvs; YYSTYPE *yyvsp; YYSIZE_T yystacksize; int yyn; int yyresult; /* Lookahead token as an internal (translated) token number. */ int yytoken; /* The variables used to return semantic value and location from the action routines. */ YYSTYPE yyval; #if YYERROR_VERBOSE /* Buffer for error messages, and its allocated size. */ char yymsgbuf[128]; char *yymsg = yymsgbuf; YYSIZE_T yymsg_alloc = sizeof yymsgbuf; #endif #define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) /* The number of symbols on the RHS of the reduced rule. Keep to zero when no symbol should be popped. */ int yylen = 0; yytoken = 0; yyss = yyssa; yyvs = yyvsa; yystacksize = YYINITDEPTH; YYDPRINTF ((stderr, "Starting parse\n")); yystate = 0; yyerrstatus = 0; yynerrs = 0; yychar = YYEMPTY; /* Cause a token to be read. */ /* Initialize stack pointers. Waste one element of value and location stack so that they stay on the same level as the state stack. The wasted elements are never initialized. */ yyssp = yyss; yyvsp = yyvs; goto yysetstate; /*------------------------------------------------------------. | yynewstate -- Push a new state, which is found in yystate. | `------------------------------------------------------------*/ yynewstate: /* In all cases, when you get here, the value and location stacks have just been pushed. So pushing a state here evens the stacks. */ yyssp++; yysetstate: *yyssp = yystate; if (yyss + yystacksize - 1 <= yyssp) { /* Get the current used size of the three stacks, in elements. */ YYSIZE_T yysize = yyssp - yyss + 1; #ifdef yyoverflow { /* Give user a chance to reallocate the stack. Use copies of these so that the &'s don't force the real ones into memory. */ YYSTYPE *yyvs1 = yyvs; yytype_int16 *yyss1 = yyss; /* Each stack pointer address is followed by the size of the data in use in that stack, in bytes. This used to be a conditional around just the two extra args, but that might be undefined if yyoverflow is a macro. */ yyoverflow (YY_("memory exhausted"), &yyss1, yysize * sizeof (*yyssp), &yyvs1, yysize * sizeof (*yyvsp), &yystacksize); yyss = yyss1; yyvs = yyvs1; } #else /* no yyoverflow */ # ifndef YYSTACK_RELOCATE goto yyexhaustedlab; # else /* Extend the stack our own way. */ if (YYMAXDEPTH <= yystacksize) goto yyexhaustedlab; yystacksize *= 2; if (YYMAXDEPTH < yystacksize) yystacksize = YYMAXDEPTH; { yytype_int16 *yyss1 = yyss; union yyalloc *yyptr = (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); if (! yyptr) goto yyexhaustedlab; YYSTACK_RELOCATE (yyss_alloc, yyss); YYSTACK_RELOCATE (yyvs_alloc, yyvs); # undef YYSTACK_RELOCATE if (yyss1 != yyssa) YYSTACK_FREE (yyss1); } # endif #endif /* no yyoverflow */ yyssp = yyss + yysize - 1; yyvsp = yyvs + yysize - 1; YYDPRINTF ((stderr, "Stack size increased to %lu\n", (unsigned long int) yystacksize)); if (yyss + yystacksize - 1 <= yyssp) YYABORT; } YYDPRINTF ((stderr, "Entering state %d\n", yystate)); if (yystate == YYFINAL) YYACCEPT; goto yybackup; /*-----------. | yybackup. | `-----------*/ yybackup: /* Do appropriate processing given the current state. Read a lookahead token if we need one and don't already have one. */ /* First try to decide what to do without reference to lookahead token. */ yyn = yypact[yystate]; if (yyn == YYPACT_NINF) goto yydefault; /* Not known => get a lookahead token if don't already have one. */ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */ if (yychar == YYEMPTY) { YYDPRINTF ((stderr, "Reading a token: ")); yychar = YYLEX; } if (yychar <= YYEOF) { yychar = yytoken = YYEOF; YYDPRINTF ((stderr, "Now at end of input.\n")); } else { yytoken = YYTRANSLATE (yychar); YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); } /* If the proper action on seeing token YYTOKEN is to reduce or to detect an error, take that action. */ yyn += yytoken; if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) goto yydefault; yyn = yytable[yyn]; if (yyn <= 0) { if (yyn == 0 || yyn == YYTABLE_NINF) goto yyerrlab; yyn = -yyn; goto yyreduce; } /* Count tokens shifted since error; after three, turn off error status. */ if (yyerrstatus) yyerrstatus--; /* Shift the lookahead token. */ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); /* Discard the shifted token. */ yychar = YYEMPTY; yystate = yyn; *++yyvsp = yylval; goto yynewstate; /*-----------------------------------------------------------. | yydefault -- do the default action for the current state. | `-----------------------------------------------------------*/ yydefault: yyn = yydefact[yystate]; if (yyn == 0) goto yyerrlab; goto yyreduce; /*-----------------------------. | yyreduce -- Do a reduction. | `-----------------------------*/ yyreduce: /* yyn is the number of a rule to reduce with. */ yylen = yyr2[yyn]; /* If YYLEN is nonzero, implement the default value of the action: `$$ = $1'. Otherwise, the following line sets YYVAL to garbage. This behavior is undocumented and Bison users should not rely upon it. Assigning to YYVAL unconditionally makes the parser a bit smaller, and it avoids a GCC warning that YYVAL may be used uninitialized. */ yyval = yyvsp[1-yylen]; YY_REDUCE_PRINT (yyn); switch (yyn) { case 2: { //n_printf("init genesisshader\n"); g_GenesisMaterial->SetName((yyvsp[(2) - (2)].str)); delete[] (yyvsp[(2) - (2)].str); ResetParserParams(); g_curGenesisMakeMaterial = new GenesisMakeMaterial(); } break; case 3: { //n_printf("in genesisshader,left\n"); } break; case 4: { //n_printf("from PropertySection to genesisshader\n"); } break; case 5: { //n_printf("out genesisshader,right\n"); g_GenesisMaterial->AddMaterial(*g_curGenesisMakeMaterial); delete g_curGenesisMakeMaterial; g_curGenesisMakeMaterial = 0; } break; case 6: {//n_printf("init PropertySection\n"); } break; case 7: {//n_printf("in ParameterSection,left\n"); } break; case 8: {//n_printf("from ParameterSection to PropertySection\n"); } break; case 9: { //n_printf("out ParameterSection,right\n"); } break; case 10: { g_curGenesisMakeMaterial->SetRenderQueue(Graphic::RenderQueue::FromString((yyvsp[(3) - (3)].str))); //n_printf("in PropertySection,setrenderqueue:%s\n", Util::String($3).AsCharPtr()); } break; case 11: { //n_printf("in TechniqueSection,left\n"); g_curGenesisMakeTechnique = new GenesisMakeTechnique(); } break; case 12: {//n_printf("from TechniqueSection to PropertySection\n"); } break; case 13: { //n_printf("in TechniqueSection,left\n"); g_curGenesisMakeTechnique = new GenesisMakeTechnique(); g_curGenesisMakeTechnique->SetName((yyvsp[(3) - (4)].str)); } break; case 14: {//n_printf("from TechniqueSection to PropertySection\n"); } break; case 15: {//n_printf("init ParameterSection\n"); } break; case 16: { if((yyvsp[(2) - (5)].spt) == Graphic::eShaderParamTexture2D) { g_curMatParam = new Graphic::MaterialParamTex2D(); } else if((yyvsp[(2) - (5)].spt) == Graphic::eShaderParamTextureCUBE) { g_curMatParam = new Graphic::MaterialParamTexCube(); } else if((yyvsp[(2) - (5)].spt) == Graphic::eShaderParamTexture1D) { g_curMatParam = new Graphic::MaterialParamTex1D(); } else if((yyvsp[(2) - (5)].spt) == Graphic::eShaderParamTexture3D) { g_curMatParam = new Graphic::MaterialParamTex3D(); } else { n_error("GenesisShader Parser : Invalid Texture Type !"); } g_curMatParam->SetName((yyvsp[(3) - (5)].str)); g_curMatParam->SetDesc((yyvsp[(3) - (5)].str)); g_curMatParam->SetStringValue((yyvsp[(5) - (5)].str)); //n_printf("define texture\n"); g_curGenesisMakeMaterial->AddMatParam(g_curMatParam); g_curMatParam = NULL; if ((yyvsp[(2) - (5)].spt) == Graphic::eShaderParamTexture2D) { char texOffestScaleValue[] = "0.0,0.0,1.0,1.0"; Util::String texOffestScale; texOffestScale.Clear(); texOffestScale.Format("%s_UV_OffsetScale",(yyvsp[(3) - (5)].str)); g_curMatParam = new Graphic::MaterialParamVector(); g_curMatParam->SetName(texOffestScale); g_curMatParam->SetDesc(texOffestScale); g_curMatParam->SetStringValue(texOffestScaleValue); g_curGenesisMakeMaterial->AddMatParam(g_curMatParam); g_curMatParam = NULL; } } break; case 17: { if((yyvsp[(2) - (6)].spt) == Graphic::eShaderParamTexture2D) { g_curMatParam = new Graphic::MaterialParamTex2D(); } else if((yyvsp[(2) - (6)].spt) == Graphic::eShaderParamTextureCUBE) { g_curMatParam = new Graphic::MaterialParamTexCube(); } else if((yyvsp[(2) - (6)].spt) == Graphic::eShaderParamTexture1D) { g_curMatParam = new Graphic::MaterialParamTex1D(); } else if((yyvsp[(2) - (6)].spt) == Graphic::eShaderParamTexture3D) { g_curMatParam = new Graphic::MaterialParamTex3D(); } else { n_error("GenesisShader Parser : Invalid Texture Type !"); } g_curMatParam->SetName((yyvsp[(3) - (6)].str)); g_curMatParam->SetDesc((yyvsp[(4) - (6)].str)); g_curMatParam->SetStringValue((yyvsp[(6) - (6)].str)); //n_printf("define texture\n"); g_curGenesisMakeMaterial->AddMatParam(g_curMatParam); g_curMatParam = NULL; if ((yyvsp[(2) - (6)].spt) == Graphic::eShaderParamTexture2D) { char texOffestScaleValue[] = "0.0,0.0,1.0,1.0"; Util::String texOffestScale; texOffestScale.Clear(); texOffestScale.Format("%s_UV_OffsetScale",(yyvsp[(3) - (6)].str)); g_curMatParam = new Graphic::MaterialParamVector(); g_curMatParam->SetName(texOffestScale); g_curMatParam->SetDesc(texOffestScale); g_curMatParam->SetStringValue(texOffestScaleValue); g_curGenesisMakeMaterial->AddMatParam(g_curMatParam); g_curMatParam = NULL; } } break; case 18: <|fim▁hole|> { g_curMatParam = new Graphic::MaterialParamMatrix(); g_curMatParam->SetName((yyvsp[(3) - (6)].str)); g_curMatParam->SetDesc((yyvsp[(4) - (6)].str)); g_curMatParam->SetStringValue((yyvsp[(6) - (6)].str)); g_curGenesisMakeMaterial->AddMatParam(g_curMatParam); g_curMatParam = NULL; } break; case 19: { g_curMatParam = new Graphic::MaterialParamMatrix(); g_curMatParam->SetName((yyvsp[(3) - (5)].str)); g_curMatParam->SetDesc((yyvsp[(3) - (5)].str)); g_curMatParam->SetStringValue((yyvsp[(5) - (5)].str)); g_curGenesisMakeMaterial->AddMatParam(g_curMatParam); g_curMatParam = NULL; } break; case 20: { g_curMatParam = new Graphic::MaterialParamVector(); g_curMatParam->SetName((yyvsp[(3) - (6)].str)); g_curMatParam->SetDesc((yyvsp[(4) - (6)].str)); g_curMatParam->SetStringValue((yyvsp[(6) - (6)].str)); g_curGenesisMakeMaterial->AddMatParam(g_curMatParam); g_curMatParam = NULL; } break; case 21: { g_curMatParam = new Graphic::MaterialParamVector(); g_curMatParam->SetName((yyvsp[(3) - (5)].str)); g_curMatParam->SetDesc((yyvsp[(3) - (5)].str)); g_curMatParam->SetStringValue((yyvsp[(5) - (5)].str)); g_curGenesisMakeMaterial->AddMatParam(g_curMatParam); g_curMatParam = NULL; } break; case 22: { g_curMatParam = new Graphic::MaterialParamFloat(); g_curMatParam->SetName((yyvsp[(3) - (6)].str)); g_curMatParam->SetDesc((yyvsp[(4) - (6)].str)); g_curMatParam->SetStringValue((yyvsp[(6) - (6)].str)); g_curGenesisMakeMaterial->AddMatParam(g_curMatParam); g_curMatParam = NULL; } break; case 23: { g_curMatParam = new Graphic::MaterialParamFloat(); g_curMatParam->SetName((yyvsp[(3) - (5)].str)); g_curMatParam->SetDesc((yyvsp[(3) - (5)].str)); g_curMatParam->SetStringValue((yyvsp[(5) - (5)].str)); g_curGenesisMakeMaterial->AddMatParam(g_curMatParam); g_curMatParam = NULL; } break; case 24: { //n_printf("init TechniqueSection\n"); } break; case 25: { //n_printf("from PassSection to TechniqueSection\n"); } break; case 26: { //n_printf("out TechniqueSection,right\n"); g_curGenesisMakeMaterial->AddTechnique(*g_curGenesisMakeTechnique); delete g_curGenesisMakeTechnique; g_curGenesisMakeTechnique = 0; } break; case 27: { //n_printf("init PassSection\n"); } break; case 28: { printf("set MatType\n"); g_curGenesisMakeTechnique->SetIsMatTemplate((yyvsp[(3) - (3)].str)); } break; case 29: { //n_printf("init Pass\n"); } break; case 30: { //n_printf("in PassSection,left\n"); } break; case 31: { //n_printf("from codeSection to PassSection\n"); } break; case 32: { //n_printf("out PassSection,right\n"); g_curGenesisMakeTechnique->AddPass(*g_curMakePass); delete g_curMakePass; g_curMakePass = 0; } break; case 33: { n_printf("in PassSection,left\n"); g_curMakePass = new GenesisMakePass(); g_curMakePass->SetName("NoName"); } break; case 34: { n_printf("in PassSection,left\n"); g_curMakePass = new GenesisMakePass(); g_curMakePass->SetName((yyvsp[(2) - (2)].str)); } break; case 35: { //n_printf("in codeSection\n"); } break; case 36: { //n_printf("from shadertype,to StateSection\n"); } break; case 37: { //n_printf("from shadertype,to shadertype\n"); } break; case 38: { //n_printf("in StateSection\n"); } break; case 39: { g_rsDesc = RenderBase::RenderStateDesc::Create(); g_rsDesc->Setup(); //n_printf("Create StateSection\n");//n_printf("init StateSection\n"); } break; case 40: { } break; case 41: { g_curMakePass->SetRenderStateDesc(g_rsDesc); g_rsDesc = 0; //n_printf("from RenderStateSetup,to shadertype\n"); } break; case 42: { //n_printf("in RenderStateSetup\n"); } break; case 43: { RenderBase::DeviceRasterizerState rrs = g_rsDesc->GetRasterizerState(); rrs.m_cullMode = RenderBase::CullModeConverter::FromString((yyvsp[(3) - (3)].str)); g_rsDesc->SetRasterizerState(rrs); } break; case 44: { RenderBase::DeviceRasterizerState rrs = g_rsDesc->GetRasterizerState(); rrs.m_fillMode = RenderBase::FillModeConverter::FromString((yyvsp[(3) - (3)].str)); g_rsDesc->SetRasterizerState(rrs); } break; case 45: { RenderBase::DeviceBlendState rbs = g_rsDesc->GetBlendState(); rbs.m_colorWriteMask[0] = RenderBase::ColorMaskConverter::FromString((yyvsp[(3) - (3)].str)); g_rsDesc->SetBlendState(rbs); } break; case 46: { //n_printf("set depthtest complete \n"); } break; case 47: { RenderBase::DeviceDepthAndStencilState rdss = g_rsDesc->GetDepthAndStencilState(); rdss.m_depthWriteMask = (yyvsp[(3) - (3)].boolean); g_rsDesc->SetDepthAndStencilState(rdss); } break; case 48: { //n_printf("set blendmode complete \n"); } break; case 49: { //n_printf("set alphatest complete \n"); } break; case 50: { //n_printf("set samplerstate complete \n"); } break; case 51: { g_curGenesisMakeMaterial->AddTextureSampler((yyvsp[(2) - (3)].str),RenderBase::TextureAddressModeConverter::FromString((yyvsp[(3) - (3)].str))); } break; case 52: { g_curGenesisMakeMaterial->AddTextureSampler((yyvsp[(2) - (4)].str),RenderBase::TextureAddressModeConverter::FromString((yyvsp[(3) - (4)].str)),RenderBase::TextureFilterOperationConverter::FromString((yyvsp[(4) - (4)].str))); } break; case 53: { RenderBase::DeviceDepthAndStencilState rdss = g_rsDesc->GetDepthAndStencilState(); rdss.m_depthEnable = true; rdss.m_zFunc = RenderBase::CompareFunctionConverter::FromString((yyvsp[(2) - (2)].str)); g_rsDesc->SetDepthAndStencilState(rdss); } break; case 54: { RenderBase::DeviceDepthAndStencilState rdss = g_rsDesc->GetDepthAndStencilState(); rdss.m_depthEnable = (yyvsp[(2) - (2)].boolean); g_rsDesc->SetDepthAndStencilState(rdss); } break; case 55: { RenderBase::DeviceBlendState rbs = g_rsDesc->GetBlendState(); rbs.m_alphaBlendEnable[0] = true; rbs.m_blendOP[0] = RenderBase::BlendOperationConverter::FromString((yyvsp[(2) - (4)].str)); rbs.m_srcBlend[0] = RenderBase::AlphaBlendFactorConverter::FromString((yyvsp[(3) - (4)].str)); rbs.m_destBlend[0] = RenderBase::AlphaBlendFactorConverter::FromString((yyvsp[(4) - (4)].str)); g_rsDesc->SetBlendState(rbs); } break; case 56: { RenderBase::DeviceBlendState rbs = g_rsDesc->GetBlendState(); rbs.m_alphaBlendEnable[0] = (yyvsp[(2) - (2)].boolean); g_rsDesc->SetBlendState(rbs); } break; case 57: { RenderBase::DeviceBlendState rbs = g_rsDesc->GetBlendState(); rbs.m_alphaTestEnable = true; rbs.m_alphaFunc = RenderBase::CompareFunctionConverter::FromString((yyvsp[(2) - (3)].str)); const Util::String& valueStr = g_curGenesisMakeMaterial->GetMatParamValueByName((yyvsp[(3) - (3)].str)); if(!valueStr.IsValidFloat() || valueStr.IsEmpty()) { n_error("Invalid alpha_to_coverage_ref value!please check your parameter type(float) and name!"); } else { rbs.m_alphaRef = valueStr.AsFloat(); } g_rsDesc->SetBlendState(rbs); } break; case 58: { RenderBase::DeviceBlendState rbs = g_rsDesc->GetBlendState(); rbs.m_alphaTestEnable = (yyvsp[(2) - (2)].boolean); g_rsDesc->SetBlendState(rbs); } break; case 59: { g_curGenesisMakeGPUProgram = new GenesisMakeGPUProgram(); g_curGenesisMakeGPUProgram->SetShaderType((yyvsp[(2) - (2)].str)); //n_printf("in shaderType,SetShaderType\n"); delete[] (yyvsp[(2) - (2)].str); } break; case 60: { //n_printf("in shaderType,left\n"); } break; case 61: { //n_printf("from DeviceTypeSetup to shaderType\n"); } break; case 62: { if(g_curGenesisMakeGPUProgram != NULL) { delete g_curGenesisMakeGPUProgram; g_curGenesisMakeGPUProgram = NULL; } //n_printf("out shaderType,right\n"); } break; case 63: { n_printf("in DeviceTypeSetup\n");} break; case 64: { g_curGenesisMakeGPUProgram->SetDeviceType((yyvsp[(3) - (3)].str)); //n_printf("in DeviceTypeSetup\n"); delete[] (yyvsp[(3) - (3)].str); } break; case 65: { //n_printf("in DeviceTypeSetup,left\n"); } break; case 66: { //n_printf("from CodeBlock to DeviceTypeSetup\n"); } break; case 67: { //n_printf("out DeviceTypeSetup,right\n"); g_curMakePass->AddShaderProgram(*g_curGenesisMakeGPUProgram); } break; case 68: { //n_printf("in CodeBlock\n"); } break; case 69: { g_curGenesisMakeGPUProgram->SetShaderCode((yyvsp[(3) - (3)].str)); //n_printf("in CodeBlock,AddGPUProgram\n"); delete[] (yyvsp[(3) - (3)].str); } break; case 70: { g_curShaderParameter = new Graphic::ShaderParam(); g_curShaderParameter->SetParamType((yyvsp[(5) - (5)].spt)); g_curShaderParameter->SetRegister((yyvsp[(3) - (5)].num)); g_curShaderParameter->SetName((yyvsp[(4) - (5)].str)); g_curGenesisMakeGPUProgram->AddParam(*g_curShaderParameter); //n_printf("bind texture\n"); delete[] $4; delete g_curShaderParameter; g_curShaderParameter = 0; } break; case 71: { g_curShaderParameter = new Graphic::ShaderParam(); g_curShaderParameter->SetParamType((yyvsp[(5) - (5)].spt)); g_curShaderParameter->SetRegister((yyvsp[(3) - (5)].num)); g_curShaderParameter->SetName((yyvsp[(4) - (5)].str)); g_curGenesisMakeGPUProgram->AddParam(*g_curShaderParameter); //n_printf("setparam matrix register\n"); delete[] $4; delete g_curShaderParameter; g_curShaderParameter = 0; } break; case 72: { g_curShaderParameter = new Graphic::ShaderParam(); g_curShaderParameter->SetParamType((yyvsp[(5) - (5)].spt)); g_curShaderParameter->SetRegister((yyvsp[(3) - (5)].num)); g_curShaderParameter->SetName((yyvsp[(4) - (5)].str)); g_curGenesisMakeGPUProgram->AddParam(*g_curShaderParameter); //n_printf("setparam vector register\n"); delete[] $4; delete g_curShaderParameter; g_curShaderParameter = 0; } break; case 73: { g_curShaderParameter = new Graphic::ShaderParam(); g_curShaderParameter->SetParamType((yyvsp[(5) - (5)].spt)); g_curShaderParameter->SetRegister((yyvsp[(3) - (5)].num)); g_curShaderParameter->SetName((yyvsp[(4) - (5)].str)); g_curGenesisMakeGPUProgram->AddParam(*g_curShaderParameter); //n_printf("setparam float register\n"); delete[] $4; delete g_curShaderParameter; g_curShaderParameter = 0; } break; default: break; } YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); *++yyvsp = yyval; /* Now `shift' the result of the reduction. Determine what state that goes to, based on the state we popped back to and the rule number reduced by. */ yyn = yyr1[yyn]; yystate = yypgoto[yyn - YYNTOKENS] + *yyssp; if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp) yystate = yytable[yystate]; else yystate = yydefgoto[yyn - YYNTOKENS]; goto yynewstate; /*------------------------------------. | yyerrlab -- here on detecting error | `------------------------------------*/ yyerrlab: /* If not already recovering from an error, report this error. */ if (!yyerrstatus) { ++yynerrs; #if ! YYERROR_VERBOSE yyerror (YY_("syntax error")); #else { YYSIZE_T yysize = yysyntax_error (0, yystate, yychar); if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM) { YYSIZE_T yyalloc = 2 * yysize; if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM)) yyalloc = YYSTACK_ALLOC_MAXIMUM; if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); yymsg = (char *) YYSTACK_ALLOC (yyalloc); if (yymsg) yymsg_alloc = yyalloc; else { yymsg = yymsgbuf; yymsg_alloc = sizeof yymsgbuf; } } if (0 < yysize && yysize <= yymsg_alloc) { (void) yysyntax_error (yymsg, yystate, yychar); yyerror (yymsg); } else { yyerror (YY_("syntax error")); if (yysize != 0) goto yyexhaustedlab; } } #endif } if (yyerrstatus == 3) { /* If just tried and failed to reuse lookahead token after an error, discard it. */ if (yychar <= YYEOF) { /* Return failure if at end of input. */ if (yychar == YYEOF) YYABORT; } else { yydestruct ("Error: discarding", yytoken, &yylval); yychar = YYEMPTY; } } /* Else will try to reuse lookahead token after shifting the error token. */ goto yyerrlab1; /*---------------------------------------------------. | yyerrorlab -- error raised explicitly by YYERROR. | `---------------------------------------------------*/ yyerrorlab: /* Pacify compilers like GCC when the user code never invokes YYERROR and the label yyerrorlab therefore never appears in user code. */ if (/*CONSTCOND*/ 0) goto yyerrorlab; /* Do not reclaim the symbols of the rule which action triggered this YYERROR. */ YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); yystate = *yyssp; goto yyerrlab1; /*-------------------------------------------------------------. | yyerrlab1 -- common code for both syntax error and YYERROR. | `-------------------------------------------------------------*/ yyerrlab1: yyerrstatus = 3; /* Each real token shifted decrements this. */ for (;;) { yyn = yypact[yystate]; if (yyn != YYPACT_NINF) { yyn += YYTERROR; if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) { yyn = yytable[yyn]; if (0 < yyn) break; } } /* Pop the current state because it cannot handle the error token. */ if (yyssp == yyss) YYABORT; yydestruct ("Error: popping", yystos[yystate], yyvsp); YYPOPSTACK (1); yystate = *yyssp; YY_STACK_PRINT (yyss, yyssp); } *++yyvsp = yylval; /* Shift the error token. */ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp); yystate = yyn; goto yynewstate; /*-------------------------------------. | yyacceptlab -- YYACCEPT comes here. | `-------------------------------------*/ yyacceptlab: yyresult = 0; goto yyreturn; /*-----------------------------------. | yyabortlab -- YYABORT comes here. | `-----------------------------------*/ yyabortlab: yyresult = 1; goto yyreturn; #if !defined(yyoverflow) || YYERROR_VERBOSE /*-------------------------------------------------. | yyexhaustedlab -- memory exhaustion comes here. | `-------------------------------------------------*/ yyexhaustedlab: yyerror (YY_("memory exhausted")); yyresult = 2; /* Fall through. */ #endif yyreturn: if (yychar != YYEMPTY) yydestruct ("Cleanup: discarding lookahead", yytoken, &yylval); /* Do not reclaim the symbols of the rule which action triggered this YYABORT or YYACCEPT. */ YYPOPSTACK (yylen); YY_STACK_PRINT (yyss, yyssp); while (yyssp != yyss) { yydestruct ("Cleanup: popping", yystos[*yyssp], yyvsp); YYPOPSTACK (1); } #ifndef yyoverflow if (yyss != yyssa) YYSTACK_FREE (yyss); #endif #if YYERROR_VERBOSE if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); #endif /* Make sure YYID is used. */ return YYID (yyresult); } int yyerror (const char *s) { n_printf("GenesisShader Error: %s At line:%d\n",s,Genesislineno); return 0; } void ResetParserParams() { if(g_curGenesisMakeGPUProgram != NULL) { delete g_curGenesisMakeGPUProgram; g_curGenesisMakeGPUProgram = NULL; } if(g_curShaderParameter != NULL) { delete g_curShaderParameter; g_curShaderParameter = NULL; } if(g_curMatParam != NULL) { delete g_curMatParam; g_curMatParam = NULL; } if(g_rsDesc.isvalid()) { g_rsDesc = 0; } if(g_curMakePass != NULL) { delete g_curMakePass; g_curMakePass = NULL; } if(g_curGenesisMakeTechnique != NULL) { delete g_curGenesisMakeTechnique; g_curGenesisMakeTechnique = NULL; } if(g_curGenesisMakeMaterial != NULL) { delete g_curGenesisMakeMaterial; g_curGenesisMakeMaterial = NULL; } }<|fim▁end|>
<|file_name|>add-csv-modal.js<|end_file_name|><|fim▁begin|>/** * This file is part of agora-gui-admin. * Copyright (C) 2015-2016 Agora Voting SL <[email protected]> * agora-gui-admin is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License. * agora-gui-admin is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * You should have received a copy of the GNU Affero General Public License * along with agora-gui-admin. If not, see <http://www.gnu.org/licenses/>. **/ angular.module('avAdmin') .controller( 'AddCsvModal', function($scope, $modalInstance, election, ConfigService, Plugins) {<|fim▁hole|> $scope.election = election; $scope.textarea = ""; $scope.helpurl = ConfigService.helpUrl; $scope.ok = function () { $modalInstance.close($("#csv-textarea").val()); }; // if there's a parent election, add those fields at the end of the example if ($scope.election.children_election_info) { $scope.childrenElections = _.map( $scope.election.children_election_info.natural_order, function (election_id) { return $scope.election.childrenElectionNames[election_id]; } ); } else { $scope.childrenElections = []; } var exhtml = {html: [], scope: {}}; Plugins.hook( 'census-add-csv-modal', { exhtml: exhtml } ); $scope.exhtml = exhtml.html; $scope = _.extend($scope, exhtml.scope); $scope.cancel = function () { $modalInstance.dismiss('cancel'); }; } );<|fim▁end|>
<|file_name|>tests_defined.js<|end_file_name|><|fim▁begin|>Tinytest.add('peerdb - defined', function (test) { var isDefined = false; try { Document; isDefined = true; }<|fim▁hole|> } test.isTrue(isDefined, "Document is not defined"); test.isTrue(Package['peerlibrary:peerdb'].Document, "Package.peerlibrary:peerdb.Document is not defined"); });<|fim▁end|>
catch (e) {
<|file_name|>day14.rs<|end_file_name|><|fim▁begin|>#[cfg(target_family = "unix")] extern crate cursive; #[cfg(target_family = "unix")] use cursive::Cursive; #[cfg(target_family = "unix")] use cursive::traits::*; #[cfg(target_family = "unix")] use cursive::views::{Dialog, DummyView, LinearLayout, SelectView, TextView}; #[cfg(target_family = "unix")] use std::fs::{self, DirEntry, File}; #[cfg(target_family = "unix")] use std::io::Read; #[cfg(target_family = "unix")] use std::path::Path; #[cfg(target_family = "windows")] fn main() { println!("TODO"); } #[cfg(target_family = "unix")] fn file_picker<D>(directory: D) -> SelectView<DirEntry> where D: AsRef<Path>, { let mut view = SelectView::new(); for entry in fs::read_dir(directory).expect("can't read directory") { if let Ok(e) = entry { let file_name = e.file_name().into_string().unwrap();<|fim▁hole|> } view.on_select(update_status).on_submit(load_contents) } #[cfg(target_family = "unix")] fn update_status(app: &mut Cursive, entry: &DirEntry) { let mut status_bar = app.find_id::<TextView>("status").unwrap(); let file_name = entry.file_name().into_string().unwrap(); let file_size = entry.metadata().unwrap().len(); let content = format!("{}: {} bytes", file_name, file_size); status_bar.set_content(content); } #[cfg(target_family = "unix")] fn load_contents(app: &mut Cursive, entry: &DirEntry) { let mut text_view = app.find_id::<TextView>("contents").unwrap(); let content = if entry.metadata().unwrap().is_dir() { "<DIR>".to_string() } else { let mut buf = String::new(); let _ = File::open(entry.file_name()) .and_then(|mut f| f.read_to_string(&mut buf)) .map_err(|e| buf = format!("Error: {}", e)); buf }; text_view.set_content(content); } #[cfg(target_family = "unix")] fn main() { println!("24 Days of Rust vol. 2 - cursive"); let mut app = Cursive::new(); let mut panes = LinearLayout::horizontal(); let picker = file_picker("."); panes.add_child(picker.fixed_size((30, 25))); panes.add_child(DummyView); panes.add_child( TextView::new("file contents") .with_id("contents") .fixed_size((50, 25)), ); let mut layout = LinearLayout::vertical(); layout.add_child(panes); layout.add_child( TextView::new("status") .scrollable(false) .with_id("status") .fixed_size((80, 1)), ); app.add_layer(Dialog::around(layout).button("Quit", |a| a.quit())); app.run(); }<|fim▁end|>
view.add_item(file_name, e); }
<|file_name|>ItemTweakConfig.java<|end_file_name|><|fim▁begin|>package com.oitsjustjose.vtweaks.common.config; import net.minecraftforge.common.ForgeConfigSpec; public class ItemTweakConfig { private static final String CATEGORY_ITEM_TWEAKS = "item tweaks"; public static ForgeConfigSpec.BooleanValue ENABLE_EGG_HATCHING; public static ForgeConfigSpec.IntValue EGG_HATCING_CHANCE; public static ForgeConfigSpec.BooleanValue ENABLE_SAPLING_SELF_PLANTING; public static ForgeConfigSpec.BooleanValue ENABLE_DESPAWN_TIME_OVERRIDE; public static ForgeConfigSpec.IntValue DESPAWN_TIME_OVERRIDE;<|fim▁hole|> ENABLE_EGG_HATCHING = COMMON_BUILDER.comment("Allows egg items in the world to hatch instead of despawn") .define("eggHatchingEnabled", true); EGG_HATCING_CHANCE = COMMON_BUILDER .comment("The chance (out of 100 - higher means more frequent) that the egg will turn into a chick\n" + "**DO NOT SET THIS TOO HIGH OR ELSE CHICKENS MAY INFINITELY LAG YOUR WORLD**") .defineInRange("eggHatchingChance", 1, 1, 100); ENABLE_SAPLING_SELF_PLANTING = COMMON_BUILDER .comment("Instead of de-spawning, saplings will attempt to plant themselves") .define("enableSaplingPlanting", true); ENABLE_DESPAWN_TIME_OVERRIDE = COMMON_BUILDER.comment("Allow for modifications to item despawn timers") .define("enableDespawnTimeAdjustments", false); DESPAWN_TIME_OVERRIDE = COMMON_BUILDER.comment( "Adjust Item Despawn Time (in ticks: 20 ticks in a second - default despawn delay is 6000 ticks)\n" + "-1 prevents items from despawning at all.\n" + "If other \"do x on despawn\" configs are enabled, then those items **will still despawn**") .defineInRange("despawnTimeAdjustments", 6000, -1, Integer.MAX_VALUE); ENABLE_CONCRETE_TWEAKS = COMMON_BUILDER .comment("Convert Concrete Powder to Concrete when the item is thrown into water") .define("enableConreteTweaks", true); COMMON_BUILDER.pop(); } }<|fim▁end|>
public static ForgeConfigSpec.BooleanValue ENABLE_CONCRETE_TWEAKS; public static void init(ForgeConfigSpec.Builder COMMON_BUILDER) { COMMON_BUILDER.comment("Item Tweaks").push(CATEGORY_ITEM_TWEAKS);
<|file_name|>lexer_rules.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- tokens = [ 'LPAREN', 'RPAREN', 'LBRACE', 'RBRACE', 'EQUAL', 'DOUBLE_EQUAL', 'NUMBER', 'COMMA', 'VAR_DEFINITION', 'IF', 'ELSE', 'END', 'ID', 'PRINT' ] t_LPAREN = r"\(" t_RPAREN = r"\)" t_LBRACE = r"\{" t_RBRACE = r"\}" t_EQUAL = r"\=" t_DOUBLE_EQUAL = r"\=\=" def t_NUMBER(token): r"[0-9]+" token.value = int(token.value) return token t_COMMA = r"," def t_VAR_DEFINITION(token): r",\sFirst\sof\s(his|her)\sName" return token def t_IF(token): r"I\spromise" return token def t_ELSE(token): r"Mayhaps" return token def t_PRINT(token): r"Hodor" return token def t_END(token): r"And\snow\shis\swatch\sis\sended" return token def t_ID(token): r"[a-zA-Z][_a-zA-Z0-9]*" return token<|fim▁hole|> t_ignore = " \t" def t_NEWLINE(token): r"\n+" token.lexer.lineno += len(token.value) def t_IGNORE_COMMENTS(token): r"//(.*)\n" token.lexer.lineno += 1 def t_error(token): raise Exception("Sintax error: Unknown token on line {0}. \"{1}\"".format(token.lineno, token.value.partition("\n")[0]))<|fim▁end|>
<|file_name|>calibrate.cpp<|end_file_name|><|fim▁begin|>/* * Copyright 2010, Intel Corporation * * This file is part of PowerTOP * * This program file is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program in a file named COPYING; if not, write to the * Free Software Foundation, Inc,<|fim▁hole|> * Boston, MA 02110-1301 USA * or just google for it. * * Authors: * Arjan van de Ven <[email protected]> */ #include <iostream> #include <fstream> #include <algorithm> #include "calibrate.h" #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <pthread.h> #include <math.h> #include <sys/types.h> #include <dirent.h> #include "../parameters/parameters.h" extern "C" { #include "../tuning/iw.h" } #include <map> #include <vector> #include <string> using namespace std; static vector<string> usb_devices; static vector<string> rfkill_devices; static vector<string> backlight_devices; static vector<string> scsi_link_devices; static int blmax; static map<string, string> saved_sysfs; static volatile int stop_measurement; static int wireless_PS; static void save_sysfs(const char *filename) { char line[4096]; ifstream file; file.open(filename, ios::in); if (!file) return; file.getline(line, 4096); file.close(); saved_sysfs[filename] = line; } static void restore_all_sysfs(void) { map<string, string>::iterator it; for (it = saved_sysfs.begin(); it != saved_sysfs.end(); it++) write_sysfs(it->first, it->second); set_wifi_power_saving("wlan0", wireless_PS); } static void find_all_usb(void) { struct dirent *entry; DIR *dir; char filename[4096]; dir = opendir("/sys/bus/usb/devices/"); if (!dir) return; while (1) { ifstream file; entry = readdir(dir); if (!entry) break; if (entry->d_name[0] == '.') continue; sprintf(filename, "/sys/bus/usb/devices/%s/power/active_duration", entry->d_name); if (access(filename, R_OK)!=0) continue; sprintf(filename, "/sys/bus/usb/devices/%s/power/idVendor", entry->d_name); file.open(filename, ios::in); if (file) { file.getline(filename, 4096); file.close(); if (strcmp(filename, "1d6b")==0) continue; } sprintf(filename, "/sys/bus/usb/devices/%s/power/control", entry->d_name); save_sysfs(filename); usb_devices.push_back(filename); } closedir(dir); } static void suspend_all_usb_devices(void) { unsigned int i; for (i = 0; i < usb_devices.size(); i++) write_sysfs(usb_devices[i], "auto\n"); } static void find_all_rfkill(void) { struct dirent *entry; DIR *dir; char filename[4096]; dir = opendir("/sys/class/rfkill/"); if (!dir) return; while (1) { ifstream file; entry = readdir(dir); if (!entry) break; if (entry->d_name[0] == '.') continue; sprintf(filename, "/sys/class/rfkill/%s/soft", entry->d_name); if (access(filename, R_OK)!=0) continue; save_sysfs(filename); rfkill_devices.push_back(filename); } closedir(dir); } static void rfkill_all_radios(void) { unsigned int i; for (i = 0; i < rfkill_devices.size(); i++) write_sysfs(rfkill_devices[i], "1\n"); } static void unrfkill_all_radios(void) { unsigned int i; for (i = 0; i < rfkill_devices.size(); i++) write_sysfs(rfkill_devices[i], "0\n"); } static void find_backlight(void) { struct dirent *entry; DIR *dir; char filename[4096]; dir = opendir("/sys/class/backlight/"); if (!dir) return; while (1) { ifstream file; entry = readdir(dir); if (!entry) break; if (entry->d_name[0] == '.') continue; sprintf(filename, "/sys/class/backlight/%s/brightness", entry->d_name); if (access(filename, R_OK)!=0) continue; save_sysfs(filename); backlight_devices.push_back(filename); sprintf(filename, "/sys/class/backlight/%s/max_brightness", entry->d_name); blmax = read_sysfs(filename); } closedir(dir); } static void lower_backlight(void) { unsigned int i; for (i = 0; i < backlight_devices.size(); i++) write_sysfs(backlight_devices[i], "0\n"); } static void find_scsi_link(void) { struct dirent *entry; DIR *dir; char filename[4096]; dir = opendir("/sys/class/scsi_host/"); if (!dir) return; while (1) { ifstream file; entry = readdir(dir); if (!entry) break; if (entry->d_name[0] == '.') continue; sprintf(filename, "/sys/class/scsi_host/%s/link_power_management_policy", entry->d_name); if (access(filename, R_OK)!=0) continue; save_sysfs(filename); scsi_link_devices.push_back(filename); } closedir(dir); } static void set_scsi_link(const char *state) { unsigned int i; for (i = 0; i < scsi_link_devices.size(); i++) write_sysfs(scsi_link_devices[i], state); } static void *burn_cpu(void *dummy) { volatile double d = 1.1; while (!stop_measurement) { d = pow(d, 1.0001); } return NULL; } static void *burn_cpu_wakeups(void *dummy) { struct timespec tm; while (!stop_measurement) { tm.tv_sec = 0; tm.tv_nsec = (unsigned long)dummy; nanosleep(&tm, NULL); } return NULL; } static void *burn_disk(void *dummy) { int fd; char buffer[64*1024]; char filename[256]; strcpy(filename ,"/tmp/powertop.XXXXXX"); fd = mkstemp(filename); if (fd < 0) { printf(_("Cannot create temp file\n")); return NULL; } while (!stop_measurement) { lseek(fd, 0, SEEK_SET); write(fd, buffer, 64*1024); fdatasync(fd); } close(fd); return NULL; } static void cpu_calibration(int threads) { int i; pthread_t thr; printf(_("Calibrating: CPU usage on %i threads\n"), threads); stop_measurement = 0; for (i = 0; i < threads; i++) pthread_create(&thr, NULL, burn_cpu, NULL); one_measurement(15); stop_measurement = 1; sleep(1); } static void wakeup_calibration(unsigned long interval) { pthread_t thr; printf(_("Calibrating: CPU wakeup power consumption\n")); stop_measurement = 0; pthread_create(&thr, NULL, burn_cpu_wakeups, (void *)interval); one_measurement(15); stop_measurement = 1; sleep(1); } static void usb_calibration(void) { unsigned int i; /* chances are one of the USB devices is bluetooth; unrfkill first */ unrfkill_all_radios(); printf(_("Calibrating USB devices\n")); for (i = 0; i < usb_devices.size(); i++) { printf(_(".... device %s \n"), usb_devices[i].c_str()); suspend_all_usb_devices(); write_sysfs(usb_devices[i], "on\n"); one_measurement(15); suspend_all_usb_devices(); sleep(3); } rfkill_all_radios(); sleep(4); } static void rfkill_calibration(void) { unsigned int i; printf(_("Calibrating radio devices\n")); for (i = 0; i < rfkill_devices.size(); i++) { printf(_(".... device %s \n"), rfkill_devices[i].c_str()); rfkill_all_radios(); write_sysfs(rfkill_devices[i], "0\n"); one_measurement(15); rfkill_all_radios(); sleep(3); } for (i = 0; i < rfkill_devices.size(); i++) { printf(_(".... device %s \n"), rfkill_devices[i].c_str()); unrfkill_all_radios(); write_sysfs(rfkill_devices[i], "1\n"); one_measurement(15); unrfkill_all_radios(); sleep(3); } rfkill_all_radios(); } static void backlight_calibration(void) { unsigned int i; printf(_("Calibrating backlight\n")); for (i = 0; i < backlight_devices.size(); i++) { char str[4096]; printf(_(".... device %s \n"), backlight_devices[i].c_str()); lower_backlight(); one_measurement(15); sprintf(str, "%i\n", blmax / 4); write_sysfs(backlight_devices[i], str); one_measurement(15); sprintf(str, "%i\n", blmax / 2); write_sysfs(backlight_devices[i], str); one_measurement(15); sprintf(str, "%i\n", 3 * blmax / 4 ); write_sysfs(backlight_devices[i], str); one_measurement(15); sprintf(str, "%i\n", blmax); write_sysfs(backlight_devices[i], str); one_measurement(15); lower_backlight(); sleep(1); } printf(_("Calibrating idle\n")); system("DISPLAY=:0 /usr/bin/xset dpms force off"); one_measurement(15); system("DISPLAY=:0 /usr/bin/xset dpms force on"); } static void idle_calibration(void) { printf(_("Calibrating idle\n")); system("DISPLAY=:0 /usr/bin/xset dpms force off"); one_measurement(15); system("DISPLAY=:0 /usr/bin/xset dpms force on"); } static void disk_calibration(void) { pthread_t thr; printf(_("Calibrating: disk usage \n")); set_scsi_link("min_power"); stop_measurement = 0; pthread_create(&thr, NULL, burn_disk, NULL); one_measurement(15); stop_measurement = 1; sleep(1); } void calibrate(void) { find_all_usb(); find_all_rfkill(); find_backlight(); find_scsi_link(); wireless_PS = get_wifi_power_saving("wlan0"); save_sysfs("/sys/module/snd_hda_intel/parameters/power_save"); cout << _("Starting PowerTOP power estimate calibration \n"); suspend_all_usb_devices(); rfkill_all_radios(); lower_backlight(); set_wifi_power_saving("wlan0", 1); sleep(4); idle_calibration(); disk_calibration(); backlight_calibration(); write_sysfs("/sys/module/snd_hda_intel/parameters/power_save", "1\n"); cpu_calibration(1); cpu_calibration(4); wakeup_calibration(10000); wakeup_calibration(100000); wakeup_calibration(1000000); set_wifi_power_saving("wlan0", 0); usb_calibration(); rfkill_calibration(); cout << _("Finishing PowerTOP power estimate calibration \n"); restore_all_sysfs(); learn_parameters(300, 1); printf(_("Parameters after calibration:\n")); dump_parameter_bundle(); save_parameters("saved_parameters.powertop"); save_all_results("saved_results.powertop"); }<|fim▁end|>
* 51 Franklin Street, Fifth Floor,
<|file_name|>graphics.py<|end_file_name|><|fim▁begin|>from reportlab.lib.colors import black from reportlab.lib.units import cm from base import BAND_WIDTH, BAND_HEIGHT, Element class Graphic(Element): """Base graphic class""" visible = True stroke = True stroke_color = black stroke_width = 1 fill = False fill_color = black def __init__(self, **kwargs): for k,v in kwargs.items(): setattr(self, k, v) def set_rect(self, **kwargs): """This method will adapt the graphic element in a rect.""" self.left = kwargs.get('left', self.left) self.top = kwargs.get('top', self.top) if 'width' in kwargs: self.width = kwargs['width'] elif 'right' in kwargs: self.width = kwargs['right'] - self.left if 'height' in kwargs: self.height = kwargs['height'] elif 'bottom' in kwargs: self.height = kwargs['bottom'] - self.top class Rect(Graphic): """A simple rectangle""" pass class RoundRect(Rect): """A rectangle graphic element that is possible set its radius and have round corners""" radius = 0.5 class Fixed(Graphic): """A fixed graphic is base on right and bottom coordinates instead of width and height. It is just a reference class and shouldn't be used directly in reports.""" left = None top = None right = None bottom = None def set_rect(self, **kwargs): self.left = kwargs.get('left', self.left) self.top = kwargs.get('top', self.top) if 'right' in kwargs: self.right = kwargs['right'] elif 'width' in kwargs: self.right = kwargs['width'] + self.left if 'bottom' in kwargs: self.bottom = kwargs['bottom'] elif 'height' in kwargs: self.bottom = kwargs['height'] + self.top class Line(Fixed): """A simple line""" @property def height(self): return self.bottom - self.top @property def width(self): return self.right - self.left class Circle(Graphic): """A simple circle""" left_center = None top_center = None radius = None<|fim▁hole|>class Arc(Fixed): """A simple circle""" start_angle = 0 extent = 90 class Ellipse(Fixed): """A simple circle""" pass class Image(Graphic): """A image""" left = None top = None _width = None _height = None filename = None _image = None # PIL image object is stored here get_image = None # To be overrided def _get_image(self): """Uses Python Imaging Library to load an image and get its informations""" if self.get_image: self._image = self.get_image(self) if not self._image and self.filename: try: import Image as PILImage except ImportError: from PIL import Image as PILImage self._image = PILImage.open(self.filename) return self._image def _set_image(self, value): self._image = value image = property(_get_image, _set_image) def _get_height(self): ret = self._height or (self.image and self.image.size[1] or 0) return ret * 0.02*cm def _set_height(self, value): self._height = value height = property(_get_height, _set_height) def _get_width(self): ret = self._width or (self.image and self.image.size[0] or 0) return ret * 0.02*cm def _set_width(self, value): self._width = value width = property(_get_width, _set_width)<|fim▁end|>
<|file_name|>lamp.py<|end_file_name|><|fim▁begin|>""" LAMP """ import numpy as np tol = 1.e-6 # zero tolerance def pdist(x): """ Pairwise distance between pairs of objects TODO: find a fast function """ n, d = x.shape dist = np.zeros((n, n)) for i in range(n): for j in range(n): dist[i][j] = np.linalg.norm(x[i] - x[j]) return dist def project(x, xs, ys): """ Projection """ assert (type(x) is np.ndarray) and (type(xs) is np.ndarray) and (type(ys) is np.ndarray), \ "*** ERROR (Force-Scheme): project input must be numpy.array type." ninst, dim = x.shape # number os instances, data dimension k, a = xs.shape # number os sample instances p = ys.shape[1] # visual space dimension assert dim == a, "*** LAMP Error: x and xs dimensions must be egual." Y = np.zeros((ninst, p)) for pt in range(ninst): # computes alphas alpha = np.zeros(k) for i in range(k): # verify if the point to be projectec is a control point # avoids division by zero if np.linalg.norm(xs[i] - x[pt]) < tol: alpha[i] = np.finfo(float).max else: alpha[i] = 1 / np.linalg.norm(xs[i] - x[pt])**2 # computes x~ and y~ (eq 3) xtilde = np.zeros(dim)<|fim▁hole|> for i in range(k): xtilde += alpha[i] * xs[i] ytilde += alpha[i] * ys[i] xtilde /= np.sum(alpha) ytilde /= np.sum(alpha) A = np.zeros((k, dim)) B = np.zeros((k, p)) xhat = np.zeros((k, dim)) yhat = np.zeros((k, p)) # computation of x^ and y^ (eq 6) for i in range(k): xhat[i] = xs[i] - xtilde yhat[i] = ys[i] - ytilde A[i] = np.sqrt(alpha[i]) * xhat[i] B[i] = np.sqrt(alpha[i]) * yhat[i] U, D, V = np.linalg.svd(np.dot(A.T, B)) # (eq 7) # VV is the matrix V filled with zeros VV = np.zeros((dim, p)) # size of U = dim, by SVD for i in range(p): # size of V = p, by SVD VV[i,range(p)] = V[i] M = np.dot(U, VV) # (eq 7) Y[pt] = np.dot(x[pt] - xtilde, M) + ytilde # (eq 8) return Y def plot(y, t): import matplotlib.pyplot as mpl mpl.scatter(y.T[0], y.T[1], c = t) mpl.show() def test(): import time, sys, force print "Loading data set... ", sys.stdout.flush() data = np.loadtxt("iris.data", delimiter=",") print "Done." n, d = data.shape k = int(np.ceil(np.sqrt(n))) x = data[:, range(d-1)] t = data[:, d-1] sample_idx = np.random.permutation(n) sample_idx = sample_idx[range(k)] xs = x[sample_idx, :] # force start_time = time.time() print "Projecting samples... ", sys.stdout.flush() ys = force.project(xs) print "Done. Elapsed time:", time.time() - start_time, "s." # lamp start_time = time.time() print "Projecting... ", sys.stdout.flush() y = project(x, xs, ys) print "Done. Elapsed time:", time.time() - start_time, "s." plot(y, t) if __name__ == "__main__": print "Running test..." test()<|fim▁end|>
ytilde = np.zeros(p)
<|file_name|>issue-20186.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license<|fim▁hole|>// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. struct Foo; impl Foo { fn putc(&self, b: u8) { } fn puts(&self, s: &str) { for byte in s.bytes() { self.putc(byte) } } } fn main() {}<|fim▁end|>
<|file_name|>registerer_test.go<|end_file_name|><|fim▁begin|>// Copyright 2020 Anapaya Systems // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package grpc_test import ( "context" "net" "testing" "time" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/scionproto/scion/go/cs/beaconing" "github.com/scionproto/scion/go/cs/beaconing/mock_beaconing" "github.com/scionproto/scion/go/lib/ctrl/seg" "github.com/scionproto/scion/go/lib/slayers/path" "github.com/scionproto/scion/go/lib/xtest" "github.com/scionproto/scion/go/lib/xtest/graph" "github.com/scionproto/scion/go/pkg/hiddenpath" hpgrpc "github.com/scionproto/scion/go/pkg/hiddenpath/grpc" "github.com/scionproto/scion/go/pkg/hiddenpath/grpc/mock_grpc" cryptopb "github.com/scionproto/scion/go/pkg/proto/crypto" "github.com/scionproto/scion/go/pkg/proto/hidden_segment" hspb "github.com/scionproto/scion/go/pkg/proto/hidden_segment" "github.com/scionproto/scion/go/pkg/proto/hidden_segment/mock_hidden_segment" ) func TestRegistererRegisterSegment(t *testing.T) { testCases := map[string]struct { input hiddenpath.SegmentRegistration<|fim▁hole|> signer func(ctrl *gomock.Controller) hpgrpc.Signer regular func(*gomock.Controller) beaconing.RPC assertErr assert.ErrorAssertionFunc }{ "valid hidden": { hpServer: func(c *gomock.Controller) hspb.HiddenSegmentRegistrationServiceServer { s := mock_hidden_segment.NewMockHiddenSegmentRegistrationServiceServer(c) s.EXPECT().HiddenSegmentRegistration(gomock.Any(), gomock.Any()). Return(&hidden_segment.HiddenSegmentRegistrationResponse{}, nil) return s }, signer: func(ctrl *gomock.Controller) hpgrpc.Signer { signer := mock_grpc.NewMockSigner(ctrl) signer.EXPECT().Sign(gomock.Any(), gomock.Any(), gomock.Any()). Return(&cryptopb.SignedMessage{}, nil) return signer }, regular: func(ctrl *gomock.Controller) beaconing.RPC { r := mock_beaconing.NewMockRPC(ctrl) r.EXPECT().RegisterSegment(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) return r }, input: hiddenpath.SegmentRegistration{ GroupID: hiddenpath.GroupID{Suffix: 42}, Seg: createSeg(), }, assertErr: assert.NoError, }, "valid public": { hpServer: func(c *gomock.Controller) hspb.HiddenSegmentRegistrationServiceServer { s := mock_hidden_segment.NewMockHiddenSegmentRegistrationServiceServer(c) s.EXPECT().HiddenSegmentRegistration(gomock.Any(), gomock.Any()). Return(&hidden_segment.HiddenSegmentRegistrationResponse{}, nil).Times(0) return s }, signer: func(ctrl *gomock.Controller) hpgrpc.Signer { return mock_grpc.NewMockSigner(ctrl) }, regular: func(ctrl *gomock.Controller) beaconing.RPC { r := mock_beaconing.NewMockRPC(ctrl) r.EXPECT().RegisterSegment(gomock.Any(), gomock.Any(), gomock.Any()).Times(1) return r }, input: hiddenpath.SegmentRegistration{ Seg: createSeg(), }, assertErr: assert.NoError, }, } for name, tc := range testCases { name, tc := name, tc t.Run(name, func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) defer ctrl.Finish() svc := xtest.NewGRPCService() hspb.RegisterHiddenSegmentRegistrationServiceServer(svc.Server(), tc.hpServer(ctrl)) svc.Start(t) s := hpgrpc.Registerer{ Dialer: svc, RegularRegistration: tc.regular(ctrl), Signer: tc.signer(ctrl), } ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() err := s.RegisterSegment(ctx, tc.input, &net.UDPAddr{}) tc.assertErr(t, err) }) } } func createSeg() seg.Meta { asEntry := seg.ASEntry{ Local: xtest.MustParseIA("1-ff00:0:110"), HopEntry: seg.HopEntry{ HopField: seg.HopField{MAC: [path.MacLen]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11}}, }, } ps, _ := seg.CreateSegment(time.Now(), 1337) ps.AddASEntry(context.Background(), asEntry, graph.NewSigner()) return seg.Meta{Type: seg.TypeDown, Segment: ps} }<|fim▁end|>
hpServer func(*gomock.Controller) hidden_segment.HiddenSegmentRegistrationServiceServer
<|file_name|>service-32.service.spec.ts<|end_file_name|><|fim▁begin|>/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license<|fim▁hole|>import { TestBed, inject } from '@angular/core/testing'; import { Service32Service } from './service-32.service'; describe('Service32Service', () => { beforeEach(() => { TestBed.configureTestingModule({ providers: [Service32Service] }); }); it('should ...', inject([Service32Service], (service: Service32Service) => { expect(service).toBeTruthy(); })); });<|fim▁end|>
*/
<|file_name|>compression_op.py<|end_file_name|><|fim▁begin|># coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Matrix compression operator. Helper functions to have an automated process to take any matrix compression algorithm and create a tensorflow operator that can be applied on a tensorflow matrix variable to compress it on the fly during training. The class MatrixCompressorInferface can be used to implement any matrix compression algorithm in the method static_matrix_compressor. The other class CompressionOpInterface is used to create a tensorflow operator that injects any matrix compression method dynamically into a tensorflow layer. This is done by specifying in the spec during initialization a MatrixCompressorInferface object that implements the method. The get_apply_compression_op return such a tensorflow operator. Further a tensorflow operator to update variables needs to be invoked periodically depending on the method. Such an operator is created using the get_update_op method. Derived classes of these interfaces can be used to create compression OPs that implement different compression methods. Such OPs have been implemented using derived classes such as LowRankDecompMatrixCompressor, CompressionOp for low rank decomposition, SimhashMatrixCompressor, SimhashCompressionOp for simhash, DLMatrixCompressor for dictionary learning. """ import copy from absl import logging import numpy as np from tensor2tensor.utils.hparam import HParams import tensorflow.compat.v2 as tf class MatrixCompressorInferface(object): """Interface for any matrix compressor algorithm. This MatrixCompressorInferface class can be implemented by any third party to implement any compression algorithm. """ def __init__(self, spec): pass def static_matrix_compressor(self, a_matrix): """Implements the matrix compression algorithm of choice to compress. Args: a_matrix: input matrix. Returns: The factor(s) or any compressed representation of a_matrix. """ raise NotImplementedError() def default_matrix(self): """Returns default matrix for initialization. Size is taken from spec. """ raise NotImplementedError() class LowRankDecompMatrixCompressor(MatrixCompressorInferface): """Low rank decomposition compressor. Implements matrix compression interface for the low rank decomposition algorithm. """ def __init__(self, spec): """Initializer. Args: spec: hparams object with default value given by self.get_default_hparams(). """ super(LowRankDecompMatrixCompressor, self).__init__(spec) self._spec = spec self.uncompressed_size = 0 self.compressed_size = 0 def get_spec(self): return self._spec @staticmethod def get_default_hparams(): """Get a tf.HParams object with the default values for the hyperparameters. name: string name of the low-rank matrix decompressor specification. rank: integer rank of the low-rank decomposition that is performed. compressor_option: integer indicates what type of factorization (if any) is used.<|fim▁hole|> trained. is_c_matrix_trainable: bool indicates whether the c_matrix matrix in the factorization is to be trained. Returns: tf.HParams object initialized to default values. """ return HParams( name='model_compression', rank=100, num_rows=10, num_cols=10, use_tpu=False, compressor_option=0, is_b_matrix_trainable=True, is_c_matrix_trainable=True, is_c_matrix_present=True, block_size=1, pruning_fraction=0.0, use_lsh=False) def static_matrix_compressor(self, a_matrix): """Low-rank decomposition of a_matrix. Args: a_matrix: input matrix. Returns: A list [b_matrix,c_matrix] which is the low-rank decomposition of a_matrix. Rank is taken from spec.rank. """ u, s, vh = np.linalg.svd(a_matrix) # If matrix dimension is smaller than rank specified then adjust rank rank = max(min(np.min(a_matrix.shape), self._spec.rank), 1) # rank = comp_op_utils.compute_compressed_rank_from_matrix_shape( # a_matrix.shape, self._spec.rank) b_matrix = u[:, :rank] c_matrix = vh[:rank, :] s_mat = np.diag(np.sqrt(s[:rank])) b_matrix = np.matmul(b_matrix, s_mat) c_matrix = np.matmul(s_mat, c_matrix) logging.info( 'Inside static_matrix_compressor: a_matrix,b_matrix,c_matrix shapes ' 'are: %s, %s, %s', a_matrix.shape, b_matrix.shape, c_matrix.shape) self.uncompressed_size = tf.size(a_matrix) self.compressed_size = b_matrix.size + c_matrix.size return [b_matrix, c_matrix] class CompressionOpInterface(object): """Interface for a compression op. Class to take a matrix compression algorithm and create a tensorflow compression operator to inject that compression dynamically during training. The compression algorithm is specified using an object of MatrixCompressorInferface class. """ def __init__(self, scope='default_scope', spec=None, global_step=None): pass def get_apply_compression_op(self, a_matrix_tfvar, matrix_compressor, scope='default_scope'): """Returns compressed tensorflow operator. Does it for variable a_matrix_tfvar for compression method specified in matrix_compressor. Args: a_matrix_tfvar: TF variable representing a tensor variable in a model. matrix_compressor: MatrixCompressorInferface object to specify the compression algorithm. scope: TF scope used for creating new TF variables. Returns: A TF node that has the compressed version of a_matrix_tfvar. """ raise NotImplementedError() def get_update_op(self): """Update operator. Returns: TF operator that implements the update steps that may need to be applied periodically. """ raise NotImplementedError() class CompressionOp(CompressionOpInterface): """Implements a compression OP. Does this based on any matrix factorization compression algorithm by replacing a variable a_matrix by alpha*a_matrix + (1-alpha)b_matrix*c_matrix. See the doc linked in the directory README for details. """ def __init__(self, scope='default_scope', spec=None, global_step=None, layer=None): """Initializer. Args: scope: TF scope used for creating new TF variables. spec: compression hyper parameters default value given by self.get_default_hparams(). global_step: tf variable that has the global step. layer: Layer to compress. """ super(CompressionOp, self).__init__(scope, spec, global_step) # Compression specification self._spec = spec # Sanity check for compression hparams self._validate_spec() self._global_step = global_step # public member variables to track the compressor, the variables and # other tf nodes corresponding to this OP. self.matrix_compressor = None self.a_matrix_tfvar = None self.b_matrix_tfvar = None self.c_matrix_tfvar = None self.alpha = None self.layer = layer self.last_alpha_update_step = None self.uncompressed_size = 0 self.compressed_size = 0 @staticmethod def get_default_hparams(): """Get a tf.HParams object with the default values for the hyperparameters. name: string name of the compression specification. Used for adding summaries and ops under a common tensorflow name_scope. alpha_decrement_value: float a positive real number by which alpha is decremented at each update. begin_compression_step: integer the global step at which to begin compression. end_compression_step: integer the global step at which to terminate compression. Defaults to -1 implying that compression continues till the training stops. use_tpu: False indicates whether to use TPU. compression_option: integer indicates what type of factorization (if any) is used. rank: integer indicates what type of factorization (if any) is used. update_option: integer indicates how the update logic is being run. More specifically: 0 - run the update logic in TF; needed when using GPU/TPU. 1 - run the update logic in regular python as opposed to TF. 2 - run the update logic in TF and in regular python. Returns: tf.HParams object initialized to default values. """ return HParams( name='model_compression', alpha_decrement_value=0.01, begin_compression_step=0, end_compression_step=-1, compression_frequency=10, use_tpu=False, compression_option=0, rank=100, update_option=0, run_update_interval_check=1, block_size=1, pruning_fraction=0.0, begin_pruning_step=0, end_pruning_step=-1, weight_sparsity_map=[''], block_dims_map=[''], threshold_decay=0.0, pruning_frequency=10, nbins=256, block_height=1, block_width=1, block_pooling_function='AVG', initial_sparsity=0.0, target_sparsity=0.5, sparsity_function_begin_step=0, sparsity_function_end_step=100, sparsity_function_exponent=3.0, gradient_decay_rate=0.99, prune_option='weight') def setup_variables(self, a_matrix_tfvar, matrix_compressor, layer): """Create compressed layer weight matrices.""" self.matrix_compressor = matrix_compressor a_matrix = np.zeros(shape=a_matrix_tfvar.shape) [b_matrix, c_matrix] = matrix_compressor.static_matrix_compressor(a_matrix) self.b_matrix_tfvar = layer.add_weight( 'b_matrix', shape=b_matrix.shape, initializer=layer.kernel_initializer, regularizer=layer.kernel_regularizer, constraint=layer.kernel_constraint, dtype=layer.dtype, trainable=True) self.c_matrix_tfvar = layer.add_weight( 'c_matrix', shape=c_matrix.shape, initializer=layer.kernel_initializer, regularizer=layer.kernel_regularizer, constraint=layer.kernel_constraint, dtype=layer.dtype, trainable=True) self.alpha = layer.add_weight( 'alpha', shape=(), initializer=tf.keras.initializers.Ones(), dtype=layer.dtype, trainable=False) self.last_alpha_update_step = layer.add_weight( 'last_alpha_update_step', shape=(), initializer=tf.keras.initializers.Constant(value=-1), dtype=tf.int32, trainable=False) self.a_matrix_tfvar = a_matrix_tfvar self.layer.alpha = self.alpha def compressed_matmul_keras(self, inputs, training=False): """Matmul with a convex combination of original and compressed weights.""" if training: compressed_mat = self.alpha * self.a_matrix_tfvar + ( 1 - self.alpha) * tf.matmul(self.b_matrix_tfvar, self.c_matrix_tfvar) return tf.matmul(inputs, compressed_mat) else: # This prevents the TFLite converter from constant-folding the product of # B & C matrices. intermediate = tf.matmul(inputs, self.b_matrix_tfvar) return tf.matmul(intermediate, self.c_matrix_tfvar) def maybe_run_update_step(self): """Creates TensorFlow update op for compression.""" def maybe_update_alpha(): """Maybe update the alpha param. Checks if global_step is between begin_compression_step and end_compression_step, and if the current training step is a compression step. Returns: Boolean tensor whether the training step is a compression step. """ is_step_within_compression_range = tf.logical_and( tf.greater_equal( tf.cast(self._global_step, tf.int32), self._spec.begin_compression_step), tf.logical_or( tf.less_equal( tf.cast(self._global_step, tf.int32), self._spec.end_compression_step), tf.less(self._spec.end_compression_step, 0))) is_compression_step = tf.less_equal( tf.add(self.last_alpha_update_step, self._spec.compression_frequency), tf.cast(self._global_step, tf.int32)) return tf.logical_and(is_step_within_compression_range, is_compression_step) def no_update_op(): pass def compressor_and_alpha_update_op_fn(): return self._compressor_and_alpha_update_op() tf.cond( pred=maybe_update_alpha(), true_fn=compressor_and_alpha_update_op_fn, false_fn=no_update_op) return def _compressor_op(self, matrix_compressor, a_matrix_tfvar): """Creates compressor op based on matrix_compressor. Meant to create the factors once at begin_compression_step. Args: matrix_compressor: specifies the matrix compressor object. a_matrix_tfvar: the tf tensor to be compressed. """ [b_matrix_out, c_matrix_out ] = tf.compat.v1.py_function(matrix_compressor.static_matrix_compressor, [a_matrix_tfvar], [tf.float32, tf.float32]) self.b_matrix_tfvar.assign(b_matrix_out) self.c_matrix_tfvar.assign(c_matrix_out) return def _update_alpha_op(self): self.alpha.assign_sub(self._spec.alpha_decrement_value, 0) self.alpha.assign(tf.math.maximum(self.alpha, 0)) return def _compressor_and_alpha_update_op(self): """Applies compressor and also updates alpha.""" self._compressor_op(self.matrix_compressor, self.a_matrix_tfvar) self._update_alpha_op() self.last_alpha_update_step.assign(tf.cast(self._global_step, tf.int32)) def _validate_spec(self): spec = self._spec if spec.begin_compression_step < 0: raise ValueError('Illegal value for begin_compression_step') if spec.begin_compression_step >= spec.end_compression_step: if spec.end_compression_step != -1: raise ValueError( 'Compression must begin before it can end. begin_step=%d, ' 'end_step=%d. Set end_compression_step to -1 if compression is ' 'required till training stops' % (spec.begin_compression_step, spec.end_compression_step)) class ApplyCompression(object): """Wrapper class. This is to repeatedly invoke above compression operator to different layers in a model. Intialized by specifying the compressor and compression_spec. After that apply_compression can be called several times for different matrices in the model. Finally all_update_op returns the combined update OP from all these compressions. """ def __init__(self, scope, compression_spec, compressor, global_step=None): """Initializer. Args: scope: TF scope used for creating new TF variables. compression_spec: compression hyper parameters. compressor: matrix compressor object of class MatrixCompressorInferface. global_step: tf variable that has the global step. """ logging.info('Entering ApplyCompression constructor') self._compression_op_spec = compression_spec self._scope = scope self._global_step = global_step self._matrix_compressor = compressor self._compression_ops = [] self._update_ops = [] self._all_update_op = None self.uncompressed_size = 0 self.compressed_size = 0 def apply_compression_keras(self, a_matrix_tfvar, scope='default_scope', layer=None): """keras version of apply_compression. Applies matrix compression OP on a_matrix_tfvar as specified in spec. Args: a_matrix_tfvar: TF variable representing a tensor variable in a model. scope: TF scope used for creating new TF variables. layer: keras layer object calling this function. Must support an add_weight method. Returns: TF node that represents the compressed version of a_matrix_tfvar. """ if self._compression_op_spec.compression_option == 9: raise NotImplementedError('InputCompression not Supported.') else: c = CompressionOp( scope=scope, spec=self._compression_op_spec, global_step=self._global_step, layer=layer) c.setup_variables(a_matrix_tfvar, self._matrix_compressor, layer=layer) return c def get_operator_hparam(self, hparam): """Returns the value of queried hparam of the compression operator.""" return self._compression_op_spec.get(hparam) def get_compression_ops(self): """Returns the compression operators used during the update steps. Returns: A list of CompressionOp objects. """ return copy.copy(self._compression_ops) def get_spec(self): """Get the spec / hparams used to create the Pruning object.""" return self._compression_op_spec<|fim▁end|>
is_b_matrix_trainable: bool indicates whether the b_matrix matrix in the factorization is to be
<|file_name|>weibos.py<|end_file_name|><|fim▁begin|># coding: utf-8 from leancloud import Object from leancloud import Query from leancloud import LeanCloudError from flask import Blueprint from flask import request from flask import redirect from flask import url_for from flask import render_template import sys sys.path.insert(0,'../') from utils import JsonDict import logging import json class Weibo(Object): pass weibos_handler = Blueprint('weibos', __name__) @weibos_handler.route('', methods=['GET']) def show(): try: weibos = Query(Weibo).descending('createdAt').find() except LeanCloudError, e: #服务端还没有Weibo这个Class if e.code == 101: weibos = [] else: raise e return render_template('weibos.html', weibos=weibos) """ try: todos = Query(Todo).descending('createdAt').find() except LeanCloudError, e: if e.code == 101: # 服务端对应的 Class 还没创建 todos = []<|fim▁hole|> """ @weibos_handler.route('', methods=['POST']) def add(): #获取搜索出来的某一页里的微博数据 weibos = request.json['weibos'] #将这些微博数据存到leancloud new_mid_list = [] for _weibo in weibos: _weibo = JsonDict(_weibo) #判断这条微博是否已经保存过 _weibo_is_saved = len(Query(Weibo).equal_to('mid',_weibo.mid).find()) > 0 if not _weibo_is_saved: #parse it to leancloud object weibo = Weibo(mid=_weibo.mid, nickname=_weibo.user_nick_name, timestamp = _weibo.timestamp, topic = _weibo.topic, pics = _weibo.pics) weibo.save() new_mid_list.append(_weibo.mid) return u'话题#%s#新增了%s条微博:%s' % (_weibo.topic, len(new_mid_list), ",".join(new_mid_list)) """ todo = Todo(content=content) todo.save() return redirect(url_for('todos.show')) """<|fim▁end|>
else: raise e return render_template('todos.html', todos=todos)
<|file_name|>screen.rs<|end_file_name|><|fim▁begin|>use crate::portal::PortalRef; use alloc::sync::Weak; use cairo::bindings::{CAIRO_FORMAT_A8, CAIRO_FORMAT_ARGB32, CAIRO_FORMAT_RGB24}; use cairo::{Cairo, Surface}; use core::mem; use graphics_base::frame_buffer::{AsSurface, AsSurfaceMut, FrameBuffer}; use graphics_base::types::{EventInput, MouseButton, MouseInputInfo, Rect}; use graphics_base::{Error, Result}; use jpeg_decoder::{Decoder, ImageInfo, PixelFormat}; const CURSOR_WIDTH: f64 = 32.0; const CURSOR_HEIGHT: f64 = 32.0; const CURSOR_HOTSPOT_X: f64 = 12.0; const CURSOR_HOTSPOT_Y: f64 = 8.0; pub struct ScreenBuffer { pub pos: Rect, pub frame_buffer_size: (u16, u16), pub frame_buffer: Weak<FrameBuffer>, pub portal_ref: PortalRef, } pub struct InputCapture { button: MouseButton, pub pos: Rect, pub portal_ref: PortalRef, } fn to_sprite(hotspot: (u16, u16)) -> (f64, f64) { (hotspot.0 as f64 - CURSOR_HOTSPOT_X, hotspot.1 as f64 - CURSOR_HOTSPOT_Y) } fn surface_from_jpeg_slice(data: &[u8]) -> Result<Surface<'static>> { let mut decoder = Decoder::new(data); let data = decoder.decode().map_err(|_| Error::NotSupported)?; let ImageInfo { width, height, pixel_format, } = decoder.info().unwrap(); let (data, format) = match pixel_format { PixelFormat::L8 => (data, CAIRO_FORMAT_A8),<|fim▁hole|> PixelFormat::RGB24 => { let mut data32 = Vec::with_capacity(width as usize * height as usize * 4); for chunk in data.chunks_exact(3) { data32.extend(chunk.iter().rev()); data32.push(0); } (data32, CAIRO_FORMAT_RGB24) } PixelFormat::CMYK32 => panic!("CMYK not supported"), }; Ok(Surface::from_vec(data, format, width, height)) } pub struct Screen<S> { cursor_hotspot: (u16, u16), cursor_sprite: (f64, f64), buttons: [bool; 3], screen_size: (u16, u16), lfb: S, cursor: Surface<'static>, wallpaper: Surface<'static>, pub buffers: Vec<ScreenBuffer>, pub input_capture: Option<InputCapture>, } unsafe impl<S> Send for Screen<S> {} impl<S> Screen<S> where S: AsSurfaceMut, { pub fn new(screen_size: (u16, u16), lfb: S) -> Self { static CURSOR_BYTES: &'static [u8] = include_bytes!("icons8-cursor-32.png"); static WALLPAPER_BYTES: &'static [u8] = include_bytes!("wallpaper.jpg"); let cursor = Surface::from_png_slice(CURSOR_BYTES).unwrap(); let cursor_hotspot = (screen_size.0 / 2, screen_size.1 / 2); let wallpaper = surface_from_jpeg_slice(WALLPAPER_BYTES).unwrap(); Self { cursor_hotspot, cursor_sprite: to_sprite(cursor_hotspot), buttons: [false; 3], screen_size, lfb, cursor, wallpaper, buffers: Vec::new(), input_capture: None, } } fn draw_buffers(cr: &Cairo, screen_size: (u16, u16), wallpaper: &Surface, buffers: &[ScreenBuffer]) { cr.new_path() .move_to(0.0, 0.0) .rel_line_to(0.0, screen_size.1 as f64) .rel_line_to(screen_size.0 as f64, 0.0) .rel_line_to(0.0, -(screen_size.1 as f64)) .rel_line_to(-(screen_size.0 as f64), 0.0); for buffer in buffers { let ScreenBuffer { pos: Rect { x, y, width, height }, frame_buffer_size, ref frame_buffer, .. } = *buffer; if let Some(frame_buffer) = frame_buffer.upgrade() { let surface = frame_buffer.as_surface(CAIRO_FORMAT_RGB24, frame_buffer_size); cr.set_source_surface(&surface, x, y).paint(); } cr.new_sub_path() .rectangle(x, y, width, height) .close_path() .clip_preserve(); } cr.set_source_surface(wallpaper, 0.0, 0.0).paint(); } fn find_portal(&self) -> Option<(Rect, PortalRef)> { let pos = self.cursor_hotspot; let x = pos.0 as f64; let y = pos.1 as f64; if let Some(InputCapture { pos, ref portal_ref, .. }) = self.input_capture { Some((pos, portal_ref.clone())) } else { for buffer in self.buffers.iter() { let ScreenBuffer { pos, ref portal_ref, .. } = *buffer; if pos.contains(x, y) { return Some((pos, portal_ref.clone())); } } None } } #[cfg(target_os = "rust_os")] pub fn update_mouse_state_delta(&mut self, dx: i16, dy: i16, dw: i8, buttons: [bool; 3]) -> Result<()> { let x = ((self.cursor_hotspot.0 as i32 + dx as i32).max(0) as u16).min(self.screen_size.0 - 1); let y = ((self.cursor_hotspot.1 as i32 + dy as i32).max(0) as u16).min(self.screen_size.1 - 1); self.update_mouse_state(x, y, dw, buttons) } pub fn update_mouse_state(&mut self, x: u16, y: u16, _dw: i8, buttons: [bool; 3]) -> Result<()> { let prev_cursor_hotspot = self.cursor_hotspot; let prev_cursor_sprite = self.cursor_sprite; let prev_buttons = self.buttons; self.cursor_hotspot = (x, y); self.cursor_sprite = to_sprite(self.cursor_hotspot); self.buttons = buttons; if let Some((pos, portal_ref)) = self.find_portal() { let screen_x = x as f64; let screen_y = y as f64; let x = screen_x - pos.x; let y = screen_y - pos.y; let info = MouseInputInfo { x, y, screen_x, screen_y, }; let mut inputs = Vec::new(); if prev_cursor_hotspot != self.cursor_hotspot { inputs.push(EventInput::MouseMove { info: info.clone() }); } for ((&prev_down, &down), &button) in prev_buttons .iter() .zip(self.buttons.iter()) .zip([MouseButton::Left, MouseButton::Middle, MouseButton::Right].iter()) { if !prev_down && down { inputs.push(EventInput::MouseButtonDown { info: info.clone(), button, }); if self.input_capture.is_none() { self.input_capture = Some(InputCapture { button, pos, portal_ref: portal_ref.clone(), }); } } else if prev_down && !down { inputs.push(EventInput::MouseButtonUp { info: info.clone(), button, }); if let Some(InputCapture { button: prev_button, .. }) = self.input_capture { if prev_button == button { self.input_capture = None; } } } } for input in inputs { portal_ref.send_input(input)?; } } let cr = self .lfb .as_surface_mut(CAIRO_FORMAT_ARGB32, self.screen_size) .into_cairo(); cr.rectangle(prev_cursor_sprite.0, prev_cursor_sprite.1, CURSOR_WIDTH, CURSOR_HEIGHT) .clip(); Self::draw_buffers(&cr, self.screen_size, &self.wallpaper, &self.buffers); cr.reset_clip() .set_source_surface(&self.cursor, self.cursor_sprite.0, self.cursor_sprite.1) .paint(); Ok(()) } pub fn update_buffers<I>(&mut self, buffers: I) where I: IntoIterator<Item = ScreenBuffer>, { let mut prev_input_capture = mem::replace(&mut self.input_capture, None); self.buffers.clear(); let buffers = buffers.into_iter(); if let (_, Some(capacity)) = buffers.size_hint() { self.buffers.reserve(capacity); } for buffer in buffers { prev_input_capture = prev_input_capture.and_then(|prev_input_capture| { if prev_input_capture.portal_ref == buffer.portal_ref { self.input_capture = Some(InputCapture { pos: buffer.pos, ..prev_input_capture }); None } else { Some(prev_input_capture) } }); self.buffers.push(buffer); } let cr = self .lfb .as_surface_mut(CAIRO_FORMAT_ARGB32, self.screen_size) .into_cairo(); Self::draw_buffers(&cr, self.screen_size, &self.wallpaper, &self.buffers); cr.reset_clip() .set_source_surface(&self.cursor, self.cursor_sprite.0, self.cursor_sprite.1) .paint(); } }<|fim▁end|>
<|file_name|>models.py<|end_file_name|><|fim▁begin|>import datetime from backend import db from cruds.crud_user_type_destinations.models import UserTypeDestinations from cruds.crud_users.models import Users from cruds import format_urls_in_text class WallMessages(db.Model): id = db.Column(db.Integer, primary_key=True, autoincrement=True) date = db.Column(db.Integer) sender = db.Column(db.Integer, db.ForeignKey("users.id")) destination = db.Column(db.Integer, db.ForeignKey("user_type_destinations.id")) param_value = db.Column(db.Integer()) message = db.Column(db.Text()) def set_fields(self, fields): self.date = fields['date'] self.sender = fields['sender'] self.destination = fields['user_type_destination_id'] self.param_value = fields['parameter'] self.message = format_urls_in_text(fields['message']) def get_sender(self): return Users.query.filter_by(id=self.sender).all() def get_destinations(self): _dict = {}<|fim▁hole|> exec(query, _dict) return _dict['users']<|fim▁end|>
query = UserTypeDestinations.query.filter_by(id=self.destination).first().users_query query = str(query).replace('$', str(self.param_value))
<|file_name|>const.py<|end_file_name|><|fim▁begin|>"""Constants for Sonarr.""" DOMAIN = "sonarr" # Config Keys CONF_BASE_PATH = "base_path" CONF_DAYS = "days" CONF_INCLUDED = "include_paths" CONF_UNIT = "unit" CONF_UPCOMING_DAYS = "upcoming_days" CONF_WANTED_MAX_ITEMS = "wanted_max_items" # Data DATA_HOST_CONFIG = "host_config" DATA_SONARR = "sonarr"<|fim▁hole|>DEFAULT_UPCOMING_DAYS = 1 DEFAULT_VERIFY_SSL = False DEFAULT_WANTED_MAX_ITEMS = 50<|fim▁end|>
DATA_SYSTEM_STATUS = "system_status" # Defaults
<|file_name|>name.go<|end_file_name|><|fim▁begin|>package commands import cmds "github.com/ipfs/go-ipfs/commands" type IpnsEntry struct { Name string Value string } var NameCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "IPFS namespace (IPNS) tool", Synopsis: ` ipfs name publish [<name>] <ipfs-path> - Publish an object to IPNS ipfs name resolve [<name>] - Gets the value currently published at an IPNS name `, ShortDescription: ` IPNS is a PKI namespace, where names are the hashes of public keys, and the private key enables publishing new (signed) values. In both publish and resolve, the default value of <name> is your own identity public key. `, LongDescription: ` IPNS is a PKI namespace, where names are the hashes of public keys, and<|fim▁hole|>and resolve, the default value of <name> is your own identity public key. Examples: Publish a <ref> to your identity name: > ipfs name publish QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy published name QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n to QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy Publish a <ref> to another public key: > ipfs name publish QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy published name QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n to QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy Resolve the value of your identity: > ipfs name resolve QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy Resolve the value of another name: > ipfs name resolve QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy `, }, Subcommands: map[string]*cmds.Command{ "publish": publishCmd, "resolve": resolveCmd, }, }<|fim▁end|>
the private key enables publishing new (signed) values. In both publish
<|file_name|>rush.rs<|end_file_name|><|fim▁begin|>extern crate libc; ///! Rush main /// Rush module mod rush { use std::ptr; use std::io::stdio; use libc::funcs::posix88::unistd::execvp; use libc::funcs::posix88::unistd::fork; use libc::types::os::arch::posix88::pid_t; /// Wait. #[link(name = "c")] extern { fn wait (status: * mut i64) -> pid_t; } /// Print the prompt. fn print_prompt () { let mut stdout = stdio::stdout (); let _ = stdout.write_str ("rush >> "); let _ = stdout.flush (); } /// Read user's input into a buffer. fn read_input (buffer: & mut [u8]) -> uint { let mut stdin = stdio::stdin (); let n = match stdin.read (buffer) { Ok (n) => n, Err (..) => 0, }; n } /// Tokenize a buffer (split at spaces). fn buffer_to_tokens (n: uint, buffer: & mut [u8]) -> Vec<String> { let input_string = match String::from_utf8 (Vec::from_slice (buffer)) { Ok (s) => s, Err (e) => fail! ("{}", e), }; let tokens: Vec<&str> = input_string.as_slice().slice (0, n-1).split(' ').collect(); let mut tokens_strings = Vec::new (); for s in tokens.iter () { tokens_strings.push (String::from_str (*s)); } tokens_strings } /// Run a command. fn run_cmd (cmd: & str, args: Vec<String>) -> pid_t<|fim▁hole|> unsafe { let pid = fork (); if pid == 0 { let cmd_c_str = cmd.to_c_str (); let arg_0 = cmd_c_str.clone (); let mut args_c : Vec<* const i8> = Vec::new (); args_c.push (arg_0.unwrap ()); for s in args.iter () { args_c.push (s.to_c_str ().unwrap ()); } args_c.push (ptr::null ()); execvp (cmd_c_str.unwrap (), args_c.as_mut_ptr ()); } pid } } /// Run the prompt. pub fn prompt () { loop { print_prompt (); /* Read input. */ let mut buffer : [u8, ..2048] = [0, ..2048]; let n = read_input (buffer); let input = buffer_to_tokens (n, buffer); let cmd = input.as_slice ()[0].as_slice (); if cmd == "exit" { break; } let args = Vec::from_slice (input.as_slice ().slice (1, input.len ())); let pid = run_cmd (cmd, args); if pid == 0 { break; } let mut status: i64 = 0; unsafe { wait (&mut status); } } } } /// Main fn main () { rush::prompt (); }<|fim▁end|>
{
<|file_name|>morestats.py<|end_file_name|><|fim▁begin|># Author: Travis Oliphant, 2002 # # Further updates and enhancements by many SciPy developers. # from __future__ import division, print_function, absolute_import import math import types import warnings from . import statlib from . import stats from .stats import find_repeats from . import distributions from numpy import isscalar, r_, log, sum, around, unique, asarray from numpy import zeros, arange, sort, amin, amax, any, where, \ atleast_1d, sqrt, ceil, floor, array, poly1d, compress, not_equal, \ pi, exp, ravel, angle import numpy as np import scipy.optimize as optimize from numpy.testing.decorators import setastest __all__ = ['mvsdist', 'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot', 'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot', 'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test', 'fligner', 'mood', 'oneway', 'wilcoxon', 'pdf_fromgamma', 'circmean', 'circvar', 'circstd', ] def bayes_mvs(data, alpha=0.90): """ Bayesian confidence intervals for the mean, var, and std. Parameters ---------- data : array_like Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`. Requires 2 or more data points. alpha : float, optional Probability that the returned confidence interval contains the true parameter. Returns ------- mean_cntr, var_cntr, std_cntr : tuple The three results are for the mean, variance and standard deviation, respectively. Each result is a tuple of the form:: (center, (lower, upper)) with `center` the mean of the conditional pdf of the value given the data, and `(lower, upper)` a confidence interval, centered on the median, containing the estimate to a probability `alpha`. Notes ----- Each tuple of mean, variance, and standard deviation estimates represent the (center, (lower, upper)) with center the mean of the conditional pdf of the value given the data and (lower, upper) is a confidence interval centered on the median, containing the estimate to a probability `alpha`. Converts data to 1-D and assumes all data has the same mean and variance. Uses Jeffrey's prior for variance and std. Equivalent to tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat)) References ---------- T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and standard-deviation from data", http://hdl.handle.net/1877/438, 2006. """ res = mvsdist(data) if alpha >= 1 or alpha <= 0: raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." % alpha) return tuple((x.mean(), x.interval(alpha)) for x in res) def mvsdist(data): """ 'Frozen' distributions for mean, variance, and standard deviation of data. Parameters ---------- data : array_like Input array. Converted to 1-D using ravel. Requires 2 or more data-points. Returns ------- mdist : "frozen" distribution object Distribution object representing the mean of the data vdist : "frozen" distribution object Distribution object representing the variance of the data sdist : "frozen" distribution object Distribution object representing the standard deviation of the data Notes ----- The return values from bayes_mvs(data) is equivalent to ``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``. In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)`` on the three distribution objects returned from this function will give the same results that are returned from `bayes_mvs`. Examples -------- >>> from scipy.stats import mvsdist >>> data = [6, 9, 12, 7, 8, 8, 13] >>> mean, var, std = mvsdist(data) We now have frozen distribution objects "mean", "var" and "std" that we can examine: >>> mean.mean() 9.0 >>> mean.interval(0.95) (6.6120585482655692, 11.387941451734431) >>> mean.std() 1.1952286093343936 """ x = ravel(data) n = len(x) if (n < 2): raise ValueError("Need at least 2 data-points.") xbar = x.mean() C = x.var() if (n > 1000): # gaussian approximations for large n mdist = distributions.norm(loc=xbar, scale=math.sqrt(C/n)) sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C/(2.*n))) vdist = distributions.norm(loc=C, scale=math.sqrt(2.0/n)*C) else: nm1 = n-1 fac = n*C/2. val = nm1/2. mdist = distributions.t(nm1,loc=xbar,scale=math.sqrt(C/nm1)) sdist = distributions.gengamma(val,-2,scale=math.sqrt(fac)) vdist = distributions.invgamma(val,scale=fac) return mdist, vdist, sdist def kstat(data,n=2): """ Return the nth k-statistic (1<=n<=4 so far). The nth k-statistic is the unique symmetric unbiased estimator of the nth cumulant kappa_n. Parameters ---------- data : array_like Input array. n : int, {1, 2, 3, 4}, optional Default is equal to 2. Returns ------- kstat : float The nth k-statistic. See Also -------- kstatvar: Returns an unbiased estimator of the variance of the k-statistic. Notes ----- The cumulants are related to central moments but are specifically defined using a power series expansion of the logarithm of the characteristic function (which is the Fourier transform of the PDF). In particular let phi(t) be the characteristic function, then:: ln phi(t) = > kappa_n (it)^n / n! (sum from n=0 to inf) The first few cumulants (kappa_n) in terms of central moments (mu_n) are:: kappa_1 = mu_1 kappa_2 = mu_2 kappa_3 = mu_3 kappa_4 = mu_4 - 3*mu_2**2 kappa_5 = mu_5 - 10*mu_2 * mu_3 References ---------- http://mathworld.wolfram.com/k-Statistic.html http://mathworld.wolfram.com/Cumulant.html """ if n > 4 or n < 1: raise ValueError("k-statistics only supported for 1<=n<=4") n = int(n) S = zeros(n+1,'d') data = ravel(data) N = len(data) for k in range(1,n+1): S[k] = sum(data**k,axis=0) if n==1: return S[1]*1.0/N elif n==2: return (N*S[2]-S[1]**2.0)/(N*(N-1.0)) elif n==3: return (2*S[1]**3 - 3*N*S[1]*S[2]+N*N*S[3]) / (N*(N-1.0)*(N-2.0)) elif n==4: return (-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 - \ 4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / \ (N*(N-1.0)*(N-2.0)*(N-3.0)) else: raise ValueError("Should not be here.") def kstatvar(data,n=2): """ Returns an unbiased estimator of the variance of the k-statistic. See `kstat` for more details of the k-statistic. Parameters ---------- data : array_like Input array. n : int, {1, 2}, optional Default is equal to 2. Returns ------- kstatvar : float The nth k-statistic variance. See Also -------- kstat """ data = ravel(data) N = len(data) if n == 1: return kstat(data,n=2)*1.0/N elif n == 2: k2 = kstat(data,n=2) k4 = kstat(data,n=4) return (2*k2*k2*N + (N-1)*k4)/(N*(N+1)) else: raise ValueError("Only n=1 or n=2 supported.") def probplot(x, sparams=(), dist='norm', fit=True, plot=None): """ Calculate quantiles for a probability plot of sample data against a specified theoretical distribution. `probplot` optionally calculates a best-fit line for the data and plots the results using Matplotlib or a given plot function. Parameters ---------- x : array_like Sample/response data from which `probplot` creates the plot. sparams : tuple, optional Distribution-specific shape parameters (location(s) and scale(s)). dist : str, optional Distribution function name. The default is 'norm' for a normal probability plot. fit : bool, optional Fit a least-squares regression (best-fit) line to the sample data if True (default). plot : object, optional If given, plots the quantiles and least squares fit. `plot` is an object with methods "plot", "title", "xlabel", "ylabel" and "text". The matplotlib.pyplot module or a Matplotlib axes object can be used, or a custom object with the same methods. By default, no plot is created. Returns ------- (osm, osr) : tuple of ndarrays Tuple of theoretical quantiles (osm, or order statistic medians) and ordered responses (osr). (slope, intercept, r) : tuple of floats, optional Tuple containing the result of the least-squares fit, if that is performed by `probplot`. `r` is the square root of the coefficient of determination. If ``fit=False`` and ``plot=None``, this tuple is not returned. Notes ----- Even if `plot` is given, the figure is not shown or saved by `probplot`; ``plot.show()`` or ``plot.savefig('figname.png')`` should be used after calling `probplot`. Examples -------- >>> import scipy.stats as stats >>> nsample = 100 >>> np.random.seed(7654321) A t distribution with small degrees of freedom: >>> ax1 = plt.subplot(221) >>> x = stats.t.rvs(3, size=nsample) >>> res = stats.probplot(x, plot=plt) A t distribution with larger degrees of freedom: >>> ax2 = plt.subplot(222) >>> x = stats.t.rvs(25, size=nsample) >>> res = stats.probplot(x, plot=plt) A mixture of 2 normal distributions with broadcasting: >>> ax3 = plt.subplot(223) >>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5], ... size=(nsample/2.,2)).ravel() >>> res = stats.probplot(x, plot=plt) A standard normal distribution: >>> ax4 = plt.subplot(224) >>> x = stats.norm.rvs(loc=0, scale=1, size=nsample) >>> res = stats.probplot(x, plot=plt) """ N = len(x) Ui = zeros(N) * 1.0 Ui[-1] = 0.5**(1.0 /N) Ui[0] = 1 - Ui[-1] i = arange(2, N) Ui[1:-1] = (i - 0.3175) / (N + 0.365) try: ppf_func = eval('distributions.%s.ppf' % dist) except AttributeError: raise ValueError("%s is not a valid distribution with a ppf." % dist) if sparams is None: sparams = () if isscalar(sparams): sparams = (sparams,) if not isinstance(sparams, tuple): sparams = tuple(sparams) """ res = inspect.getargspec(ppf_func) if not ('loc' == res[0][-2] and 'scale' == res[0][-1] and \ 0.0==res[-1][-2] and 1.0==res[-1][-1]): raise ValueError("Function has does not have default location " "and scale parameters\n that are 0.0 and 1.0 respectively.") if (len(sparams) < len(res[0])-len(res[-1])-1) or \ (len(sparams) > len(res[0])-3): raise ValueError("Incorrect number of shape parameters.") """ osm = ppf_func(Ui, *sparams) osr = sort(x) if fit or (plot is not None): # perform a linear fit. slope, intercept, r, prob, sterrest = stats.linregress(osm, osr) if plot is not None: plot.plot(osm, osr, 'o', osm, slope*osm + intercept) plot.title('Probability Plot') plot.xlabel('Quantiles') plot.ylabel('Ordered Values') xmin = amin(osm) xmax = amax(osm) ymin = amin(x) ymax = amax(x) posx = xmin + 0.70 * (xmax - xmin) posy = ymin + 0.01 * (ymax - ymin) plot.text(posx, posy, "r^2=%1.4f" % r) if fit: return (osm, osr), (slope, intercept, r) else: return osm, osr def ppcc_max(x, brack=(0.0,1.0), dist='tukeylambda'): """Returns the shape parameter that maximizes the probability plot correlation coefficient for the given data to a one-parameter family of distributions. See also ppcc_plot """ try: ppf_func = eval('distributions.%s.ppf'%dist) except AttributeError: raise ValueError("%s is not a valid distribution with a ppf." % dist) """ res = inspect.getargspec(ppf_func) if not ('loc' == res[0][-2] and 'scale' == res[0][-1] and \ 0.0==res[-1][-2] and 1.0==res[-1][-1]): raise ValueError("Function has does not have default location " "and scale parameters\n that are 0.0 and 1.0 respectively.") if (1 < len(res[0])-len(res[-1])-1) or \ (1 > len(res[0])-3): raise ValueError("Must be a one-parameter family.") """ N = len(x) # compute uniform median statistics Ui = zeros(N)*1.0 Ui[-1] = 0.5**(1.0/N) Ui[0] = 1-Ui[-1] i = arange(2,N) Ui[1:-1] = (i-0.3175)/(N+0.365) osr = sort(x) # this function computes the x-axis values of the probability plot # and computes a linear regression (including the correlation) # and returns 1-r so that a minimization function maximizes the # correlation def tempfunc(shape, mi, yvals, func): xvals = func(mi, shape) r, prob = stats.pearsonr(xvals, yvals) return 1-r return optimize.brent(tempfunc, brack=brack, args=(Ui, osr, ppf_func)) def ppcc_plot(x,a,b,dist='tukeylambda', plot=None, N=80): """Returns (shape, ppcc), and optionally plots shape vs. ppcc (probability plot correlation coefficient) as a function of shape parameter for a one-parameter family of distributions from shape value a to b. See also ppcc_max """ svals = r_[a:b:complex(N)] ppcc = svals*0.0 k=0 for sval in svals: r1,r2 = probplot(x,sval,dist=dist,fit=1) ppcc[k] = r2[-1] k += 1 if plot is not None: plot.plot(svals, ppcc, 'x') plot.title('(%s) PPCC Plot' % dist) plot.xlabel('Prob Plot Corr. Coef.')#,deltay=-0.01) plot.ylabel('Shape Values')#,deltax=-0.01) return svals, ppcc def boxcox_llf(lmb, data): """The boxcox log-likelihood function. """ N = len(data) y = boxcox(data,lmb) my = np.mean(y, axis=0) f = (lmb-1)*sum(log(data),axis=0) f -= N/2.0*log(sum((y-my)**2.0/N,axis=0)) return f def _boxcox_conf_interval(x, lmax, alpha): # Need to find the lambda for which # f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1 fac = 0.5*distributions.chi2.ppf(1-alpha,1) target = boxcox_llf(lmax,x)-fac def rootfunc(lmbda,data,target): return boxcox_llf(lmbda,data) - target # Find positive endpont newlm = lmax+0.5 N = 0 while (rootfunc(newlm,x,target) > 0.0) and (N < 500): newlm += 0.1 N +=1 if N == 500: raise RuntimeError("Could not find endpoint.") lmplus = optimize.brentq(rootfunc,lmax,newlm,args=(x,target)) newlm = lmax-0.5 N = 0 while (rootfunc(newlm,x,target) > 0.0) and (N < 500): newlm += 0.1 N +=1 if N == 500: raise RuntimeError("Could not find endpoint.") lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x,target)) return lmminus, lmplus def boxcox(x,lmbda=None,alpha=None): """ Return a positive dataset transformed by a Box-Cox power transformation. Parameters ---------- x : ndarray Input array. lmbda : {None, scalar}, optional If `lmbda` is not None, do the transformation for that value. If `lmbda` is None, find the lambda that maximizes the log-likelihood function and return it as the second output argument. alpha : {None, float}, optional If `alpha` is not None, return the ``100 * (1-alpha)%`` confidence interval for `lmbda` as the third output argument. If `alpha` is not None it must be between 0.0 and 1.0. Returns ------- boxcox : ndarray Box-Cox power transformed array. maxlog : float, optional If the `lmbda` parameter is None, the second returned argument is the lambda that maximizes the log-likelihood function. (min_ci, max_ci) : tuple of float, optional If `lmbda` parameter is None and `alpha` is not None, this returned tuple of floats represents the minimum and maximum confidence limits given `alpha`. """ if any(x < 0): raise ValueError("Data must be positive.") if lmbda is not None: # single transformation lmbda = lmbda*(x==x) y = where(lmbda == 0, log(x), (x**lmbda - 1)/lmbda) return y # Otherwise find the lmbda that maximizes the log-likelihood function. def tempfunc(lmb, data): # function to minimize return -boxcox_llf(lmb,data) lmax = optimize.brent(tempfunc, brack=(-2.0,2.0),args=(x,)) y = boxcox(x, lmax) if alpha is None: return y, lmax # Otherwise find confidence interval interval = _boxcox_conf_interval(x, lmax, alpha) return y, lmax, interval def boxcox_normmax(x,brack=(-1.0,1.0)): N = len(x) # compute uniform median statistics Ui = zeros(N)*1.0 Ui[-1] = 0.5**(1.0/N) Ui[0] = 1-Ui[-1] i = arange(2,N) Ui[1:-1] = (i-0.3175)/(N+0.365) # this function computes the x-axis values of the probability plot # and computes a linear regression (including the correlation) # and returns 1-r so that a minimization function maximizes the # correlation xvals = distributions.norm.ppf(Ui) def tempfunc(lmbda, xvals, samps): y = boxcox(samps,lmbda) yvals = sort(y) r, prob = stats.pearsonr(xvals, yvals) return 1-r return optimize.brent(tempfunc, brack=brack, args=(xvals, x)) def boxcox_normplot(x,la,lb,plot=None,N=80): svals = r_[la:lb:complex(N)] ppcc = svals*0.0 k = 0 for sval in svals: #JP: this doesn't use sval, creates constant ppcc, and horizontal line z = boxcox(x,sval) #JP: this was missing r1,r2 = probplot(z,dist='norm',fit=1) ppcc[k] = r2[-1] k +=1 if plot is not None: plot.plot(svals, ppcc, 'x') plot.title('Box-Cox Normality Plot') plot.xlabel('Prob Plot Corr. Coef.') plot.ylabel('Transformation parameter') return svals, ppcc def shapiro(x,a=None,reta=False): """ Perform the Shapiro-Wilk test for normality. The Shapiro-Wilk test tests the null hypothesis that the data was drawn from a normal distribution. Parameters ---------- x : array_like Array of sample data. a : array_like, optional Array of internal parameters used in the calculation. If these are not given, they will be computed internally. If x has length n, then a must have length n/2. reta : bool, optional Whether or not to return the internally computed a values. The default is False. Returns ------- W : float The test statistic. p-value : float The p-value for the hypothesis test. a : array_like, optional If `reta` is True, then these are the internally computed "a" values that may be passed into this function on future calls. See Also -------- anderson : The Anderson-Darling test for normality References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm """ N = len(x) if N < 3: raise ValueError("Data must be at least length 3.") if a is None: a = zeros(N,'f') init = 0 else: if len(a) != N//2: raise ValueError("len(a) must equal len(x)/2") init = 1 y = sort(x) a, w, pw, ifault = statlib.swilk(y, a[:N//2], init) if not ifault in [0,2]: warnings.warn(str(ifault)) if N > 5000: warnings.warn("p-value may not be accurate for N > 5000.") if reta: return w, pw, a else: return w, pw # Values from Stephens, M A, "EDF Statistics for Goodness of Fit and # Some Comparisons", Journal of he American Statistical # Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737 _Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092]) _Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957]) # From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution", # Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588. _Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038]) # From Stephens, M A, "Tests of Fit for the Logistic Distribution Based # on the Empirical Distribution Function.", Biometrika, # Vol. 66, Issue 3, Dec. 1979, pp 591-595. _Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010]) def anderson(x,dist='norm'): """ Anderson-Darling test for data coming from a particular distribution The Anderson-Darling test is a modification of the Kolmogorov- Smirnov test kstest_ for the null hypothesis that a sample is drawn from a population that follows a particular distribution. For the Anderson-Darling test, the critical values depend on which distribution is being tested against. This function works for normal, exponential, logistic, or Gumbel (Extreme Value Type I) distributions. Parameters ---------- x : array_like array of sample data dist : {'norm','expon','logistic','gumbel','extreme1'}, optional the type of distribution to test against. The default is 'norm' and 'extreme1' is a synonym for 'gumbel' Returns ------- A2 : float The Anderson-Darling test statistic critical : list The critical values for this distribution sig : list The significance levels for the corresponding critical values in percents. The function returns critical values for a differing set of significance levels depending on the distribution that is being tested against. Notes ----- Critical values provided are for the following significance levels: normal/exponenential 15%, 10%, 5%, 2.5%, 1% logistic 25%, 10%, 5%, 2.5%, 1%, 0.5% Gumbel 25%, 10%, 5%, 2.5%, 1% If A2 is larger than these critical values then for the corresponding significance level, the null hypothesis that the data come from the chosen distribution can be rejected. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm .. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and Some Comparisons, Journal of the American Statistical Association, Vol. 69, pp. 730-737. .. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit Statistics with Unknown Parameters, Annals of Statistics, Vol. 4, pp. 357-369. .. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value Distribution, Biometrika, Vol. 64, pp. 583-588. .. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference to Tests for Exponentiality , Technical Report No. 262, Department of Statistics, Stanford University, Stanford, CA. .. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution Based on the Empirical Distribution Function, Biometrika, Vol. 66, pp. 591-595. """ if not dist in ['norm','expon','gumbel','extreme1','logistic']: raise ValueError("Invalid distribution; dist must be 'norm', " "'expon', 'gumbel', 'extreme1' or 'logistic'.") y = sort(x) xbar = np.mean(x, axis=0) N = len(y) if dist == 'norm': s = np.std(x, ddof=1, axis=0) w = (y-xbar)/s z = distributions.norm.cdf(w) sig = array([15,10,5,2.5,1]) critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N),3) elif dist == 'expon': w = y / xbar z = distributions.expon.cdf(w) sig = array([15,10,5,2.5,1]) critical = around(_Avals_expon / (1.0 + 0.6/N),3) elif dist == 'logistic': def rootfunc(ab,xj,N): a,b = ab tmp = (xj-a)/b tmp2 = exp(tmp) val = [sum(1.0/(1+tmp2),axis=0)-0.5*N, sum(tmp*(1.0-tmp2)/(1+tmp2),axis=0)+N] return array(val) sol0=array([xbar,np.std(x, ddof=1, axis=0)]) sol = optimize.fsolve(rootfunc,sol0,args=(x,N),xtol=1e-5) w = (y-sol[0])/sol[1] z = distributions.logistic.cdf(w) sig = array([25,10,5,2.5,1,0.5]) critical = around(_Avals_logistic / (1.0+0.25/N),3) else: # (dist == 'gumbel') or (dist == 'extreme1'): #the following is incorrect, see ticket:1097 ## def fixedsolve(th,xj,N): ## val = stats.sum(xj)*1.0/N ## tmp = exp(-xj/th) ## term = sum(xj*tmp,axis=0) ## term /= sum(tmp,axis=0) ## return val - term ## s = optimize.fixed_point(fixedsolve, 1.0, args=(x,N),xtol=1e-5) ## xbar = -s*log(sum(exp(-x/s),axis=0)*1.0/N) xbar, s = distributions.gumbel_l.fit(x) w = (y-xbar)/s z = distributions.gumbel_l.cdf(w) sig = array([25,10,5,2.5,1]) critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)),3) i = arange(1,N+1) S = sum((2*i-1.0)/N*(log(z)+log(1-z[::-1])),axis=0) A2 = -N-S return A2, critical, sig def ansari(x,y): """ Perform the Ansari-Bradley test for equal scale parameters The Ansari-Bradley test is a non-parametric test for the equality of the scale parameter of the distributions from which two samples were drawn. Parameters ---------- x, y : array_like arrays of sample data Returns ------- AB : float The Ansari-Bradley test statistic p-value : float The p-value of the hypothesis test See Also -------- fligner : A non-parametric test for the equality of k variances mood : A non-parametric test for the equality of two scale parameters Notes ----- The p-value given is exact when the sample sizes are both less than 55 and there are no ties, otherwise a normal approximation for the p-value is used. References ---------- .. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2. """ x,y = asarray(x),asarray(y) n = len(x) m = len(y) if m < 1: raise ValueError("Not enough other observations.") if n < 1: raise ValueError("Not enough test observations.") N = m+n xy = r_[x,y] # combine rank = stats.rankdata(xy) symrank = amin(array((rank,N-rank+1)),0) AB = sum(symrank[:n],axis=0) uxy = unique(xy) repeats = (len(uxy) != len(xy)) exact = ((m<55) and (n<55) and not repeats) if repeats and ((m < 55) or (n < 55)): warnings.warn("Ties preclude use of exact statistic.") if exact: astart, a1, ifault = statlib.gscale(n,m) ind = AB-astart total = sum(a1,axis=0) if ind < len(a1)/2.0: cind = int(ceil(ind)) if (ind == cind): pval = 2.0*sum(a1[:cind+1],axis=0)/total else: pval = 2.0*sum(a1[:cind],axis=0)/total else: find = int(floor(ind)) if (ind == floor(ind)): pval = 2.0*sum(a1[find:],axis=0)/total else: pval = 2.0*sum(a1[find+1:],axis=0)/total return AB, min(1.0,pval) # otherwise compute normal approximation if N % 2: # N odd mnAB = n*(N+1.0)**2 / 4.0 / N varAB = n*m*(N+1.0)*(3+N**2)/(48.0*N**2) else: mnAB = n*(N+2.0)/4.0 varAB = m*n*(N+2)*(N-2.0)/48/(N-1.0) if repeats: # adjust variance estimates # compute sum(tj * rj**2,axis=0) fac = sum(symrank**2,axis=0) if N % 2: # N odd varAB = m*n*(16*N*fac-(N+1)**4)/(16.0 * N**2 * (N-1)) else: # N even varAB = m*n*(16*fac-N*(N+2)**2)/(16.0 * N * (N-1)) z = (AB - mnAB)/sqrt(varAB) pval = distributions.norm.sf(abs(z)) * 2.0 return AB, pval def bartlett(*args): """ Perform Bartlett's test for equal variances Bartlett's test tests the null hypothesis that all input samples are from populations with equal variances. For samples from significantly non-normal populations, Levene's test `levene`_ is more robust. Parameters ---------- sample1, sample2,... : array_like arrays of sample data. May be different lengths. Returns ------- T : float The test statistic. p-value : float The p-value of the test. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm .. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical Methods, Eighth Edition, Iowa State University Press. """ k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") Ni = zeros(k) ssq = zeros(k,'d') for j in range(k): Ni[j] = len(args[j]) ssq[j] = np.var(args[j], ddof=1) Ntot = sum(Ni,axis=0) spsq = sum((Ni-1)*ssq,axis=0)/(1.0*(Ntot-k)) numer = (Ntot*1.0-k)*log(spsq) - sum((Ni-1.0)*log(ssq),axis=0) denom = 1.0 + (1.0/(3*(k-1)))*((sum(1.0/(Ni-1.0),axis=0))-1.0/(Ntot-k)) T = numer / denom pval = distributions.chi2.sf(T,k-1) # 1 - cdf return T, pval def levene(*args,**kwds): """ Perform Levene test for equal variances. The Levene test tests the null hypothesis that all input samples are from populations with equal variances. Levene's test is an alternative to Bartlett's test `bartlett` in the case where there are significant deviations from normality. Parameters ---------- sample1, sample2, ... : array_like The sample data, possibly with different lengths center : {'mean', 'median', 'trimmed'}, optional Which function of the data to use in the test. The default is 'median'. proportiontocut : float, optional When `center` is 'trimmed', this gives the proportion of data points to cut from each end. (See `scipy.stats.trim_mean`.) Default is 0.05. Returns ------- W : float The test statistic. p-value : float The p-value for the test. Notes ----- Three variations of Levene's test are possible. The possibilities and their recommended usages are: * 'median' : Recommended for skewed (non-normal) distributions> * 'mean' : Recommended for symmetric, moderate-tailed distributions. * 'trimmed' : Recommended for heavy-tailed distributions. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm .. [2] Levene, H. (1960). In Contributions to Probability and Statistics: Essays in Honor of Harold Hotelling, I. Olkin et al. eds., Stanford University Press, pp. 278-292. .. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American Statistical Association, 69, 364-367 """ # Handle keyword arguments. center = 'median' proportiontocut = 0.05 for kw, value in kwds.items(): if kw not in ['center', 'proportiontocut']: raise TypeError("levene() got an unexpected keyword argument '%s'" % kw) if kw == 'center': center = value else: proportiontocut = value k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") Ni = zeros(k) Yci = zeros(k,'d') if not center in ['mean','median','trimmed']: raise ValueError("Keyword argument <center> must be 'mean', 'median'" + "or 'trimmed'.") if center == 'median': func = lambda x: np.median(x, axis=0) elif center == 'mean': func = lambda x: np.mean(x, axis=0) else: # center == 'trimmed' args = tuple(stats.trimboth(arg, proportiontocut) for arg in args) func = lambda x: np.mean(x, axis=0) for j in range(k): Ni[j] = len(args[j]) Yci[j] = func(args[j]) Ntot = sum(Ni,axis=0) # compute Zij's Zij = [None]*k for i in range(k): Zij[i] = abs(asarray(args[i])-Yci[i]) # compute Zbari Zbari = zeros(k,'d') Zbar = 0.0 for i in range(k): Zbari[i] = np.mean(Zij[i], axis=0) Zbar += Zbari[i]*Ni[i] Zbar /= Ntot numer = (Ntot-k)*sum(Ni*(Zbari-Zbar)**2,axis=0) # compute denom_variance dvar = 0.0 for i in range(k): dvar += sum((Zij[i]-Zbari[i])**2,axis=0) denom = (k-1.0)*dvar W = numer / denom pval = distributions.f.sf(W,k-1,Ntot-k) # 1 - cdf return W, pval @setastest(False) def binom_test(x,n=None,p=0.5): """ Perform a test that the probability of success is p. This is an exact, two-sided test of the null hypothesis that the probability of success in a Bernoulli experiment is `p`. Parameters ---------- x : integer or array_like the number of successes, or if x has length 2, it is the number of successes and the number of failures. n : integer the number of trials. This is ignored if x gives both the number of successes and failures p : float, optional The hypothesized probability of success. 0 <= p <= 1. The default value is p = 0.5 Returns ------- p-value : float The p-value of the hypothesis test References ---------- .. [1] http://en.wikipedia.org/wiki/Binomial_test """ x = atleast_1d(x).astype(np.integer) if len(x) == 2: n = x[1]+x[0] x = x[0] elif len(x) == 1: x = x[0] if n is None or n < x: raise ValueError("n must be >= x") n = np.int_(n) else: raise ValueError("Incorrect length for x.") if (p > 1.0) or (p < 0.0): raise ValueError("p must be in range [0,1]") d = distributions.binom.pmf(x,n,p) rerr = 1+1e-7 if (x < p*n): i = np.arange(np.ceil(p*n),n+1) y = np.sum(distributions.binom.pmf(i,n,p) <= d*rerr,axis=0) pval = distributions.binom.cdf(x,n,p) + distributions.binom.sf(n-y,n,p) else: i = np.arange(np.floor(p*n)) y = np.sum(distributions.binom.pmf(i,n,p) <= d*rerr,axis=0) pval = distributions.binom.cdf(y-1,n,p) + distributions.binom.sf(x-1,n,p) return min(1.0,pval) def _apply_func(x,g,func): # g is list of indices into x # separating x into different groups # func should be applied over the groups g = unique(r_[0,g,len(x)]) output = [] for k in range(len(g)-1): output.append(func(x[g[k]:g[k+1]])) return asarray(output) def fligner(*args,**kwds):<|fim▁hole|> Fligner's test tests the null hypothesis that all input samples are from populations with equal variances. Fligner's test is non-parametric in contrast to Bartlett's test `bartlett` and Levene's test `levene`. Parameters ---------- sample1, sample2, ... : array_like arrays of sample data. Need not be the same length center : {'mean', 'median', 'trimmed'}, optional keyword argument controlling which function of the data is used in computing the test statistic. The default is 'median'. proportiontocut : float, optional When `center` is 'trimmed', this gives the proportion of data points to cut from each end. (See `scipy.stats.trim_mean`.) Default is 0.05. Returns ------- Xsq : float the test statistic p-value : float the p-value for the hypothesis test Notes ----- As with Levene's test there are three variants of Fligner's test that differ by the measure of central tendency used in the test. See `levene` for more information. References ---------- .. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps .. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample tests for scale. 'Journal of the American Statistical Association.' 71(353), 210-213. """ # Handle keyword arguments. center = 'median' proportiontocut = 0.05 for kw, value in kwds.items(): if kw not in ['center', 'proportiontocut']: raise TypeError("fligner() got an unexpected keyword argument '%s'" % kw) if kw == 'center': center = value else: proportiontocut = value k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") if not center in ['mean','median','trimmed']: raise ValueError("Keyword argument <center> must be 'mean', 'median'" + "or 'trimmed'.") if center == 'median': func = lambda x: np.median(x, axis=0) elif center == 'mean': func = lambda x: np.mean(x, axis=0) else: # center == 'trimmed' args = tuple(stats.trimboth(arg, proportiontocut) for arg in args) func = lambda x: np.mean(x, axis=0) Ni = asarray([len(args[j]) for j in range(k)]) Yci = asarray([func(args[j]) for j in range(k)]) Ntot = sum(Ni,axis=0) # compute Zij's Zij = [abs(asarray(args[i])-Yci[i]) for i in range(k)] allZij = [] g = [0] for i in range(k): allZij.extend(list(Zij[i])) g.append(len(allZij)) ranks = stats.rankdata(allZij) a = distributions.norm.ppf(ranks/(2*(Ntot+1.0)) + 0.5) # compute Aibar Aibar = _apply_func(a,g,sum) / Ni anbar = np.mean(a, axis=0) varsq = np.var(a,axis=0, ddof=1) Xsq = sum(Ni*(asarray(Aibar)-anbar)**2.0,axis=0)/varsq pval = distributions.chi2.sf(Xsq,k-1) # 1 - cdf return Xsq, pval def mood(x,y): """ Perform Mood's test for equal scale parameters. Mood's two-sample test for scale parameters is a non-parametric test for the null hypothesis that two samples are drawn from the same distribution with the same scale parameter. Parameters ---------- x, y : array_like Arrays of sample data. Returns ------- z : float The z-score for the hypothesis test. p-value : float The p-value for the hypothesis test. See Also -------- fligner : A non-parametric test for the equality of k variances ansari : A non-parametric test for the equality of 2 variances bartlett : A parametric test for equality of k variances in normal samples levene : A parametric test for equality of k variances Notes ----- The data are assumed to be drawn from probability distributions f(x) and f(x/s)/s respectively, for some probability density function f. The null hypothesis is that s = 1. """ n = len(x) m = len(y) xy = r_[x,y] N = m+n if N < 3: raise ValueError("Not enough observations.") ranks = stats.rankdata(xy) Ri = ranks[:n] M = sum((Ri - (N+1.0)/2)**2,axis=0) # Approx stat. mnM = n*(N*N-1.0)/12 varM = m*n*(N+1.0)*(N+2)*(N-2)/180 z = (M-mnM)/sqrt(varM) # Numerically better than p = norm.cdf(x); p = min(p, 1 - p) if z > 0: pval = distributions.norm.sf(z) else: pval = distributions.norm.cdf(z) # Account for two-sidedness pval *= 2. return z, pval def oneway(*args,**kwds): """Test for equal means in two or more samples from the normal distribution. If the keyword parameter <equal_var> is true then the variances are assumed to be equal, otherwise they are not assumed to be equal (default). Return test statistic and the p-value giving the probability of error if the null hypothesis (equal means) is rejected at this value. """ k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") if 'equal_var' in kwds: if kwds['equal_var']: evar = 1 else: evar = 0 else: evar = 0 Ni = array([len(args[i]) for i in range(k)]) Mi = array([np.mean(args[i], axis=0) for i in range(k)]) Vi = array([np.var(args[i]) for i in range(k)]) Wi = Ni / Vi swi = sum(Wi,axis=0) N = sum(Ni,axis=0) my = sum(Mi*Ni,axis=0)*1.0/N tmp = sum((1-Wi/swi)**2 / (Ni-1.0),axis=0)/(k*k-1.0) if evar: F = ((sum(Ni*(Mi-my)**2,axis=0) / (k-1.0)) / (sum((Ni-1.0)*Vi,axis=0) / (N-k))) pval = distributions.f.sf(F,k-1,N-k) # 1-cdf else: m = sum(Wi*Mi,axis=0)*1.0/swi F = sum(Wi*(Mi-m)**2,axis=0) / ((k-1.0)*(1+2*(k-2)*tmp)) pval = distributions.f.sf(F,k-1.0,1.0/(3*tmp)) return F, pval def wilcoxon(x, y=None, zero_method="wilcox"): """ Calculate the Wilcoxon signed-rank test. The Wilcoxon signed-rank test tests the null hypothesis that two related paired samples come from the same distribution. In particular, it tests whether the distribution of the differences x - y is symmetric about zero. It is a non-parametric version of the paired T-test. Parameters ---------- x : array_like The first set of measurements. y : array_like, optional The second set of measurements. If `y` is not given, then the `x` array is considered to be the differences between the two sets of measurements. zero_method : string, {"pratt", "wilcox", "zsplit"}, optional "pratt": Pratt treatment: includes zero-differences in the ranking process (more conservative) "wilcox": Wilcox treatment: discards all zero-differences "zsplit": Zero rank split: just like Pratt, but spliting the zero rank between positive and negative ones Returns ------- T : float The sum of the ranks of the differences above or below zero, whichever is smaller. p-value : float The two-sided p-value for the test. Notes ----- Because the normal approximation is used for the calculations, the samples used should be large. A typical rule is to require that n > 20. References ---------- .. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test """ if not zero_method in ["wilcox", "pratt", "zsplit"]: raise ValueError("Zero method should be either 'wilcox' \ or 'pratt' or 'zsplit'") if y is None: d = x else: x, y = map(asarray, (x, y)) if len(x) != len(y): raise ValueError('Unequal N in wilcoxon. Aborting.') d = x-y if zero_method == "wilcox": d = compress(not_equal(d, 0), d, axis=-1) # Keep all non-zero differences count = len(d) if (count < 10): warnings.warn("Warning: sample size too small for normal approximation.") r = stats.rankdata(abs(d)) r_plus = sum((d > 0) * r, axis=0) r_minus = sum((d < 0) * r, axis=0) if zero_method == "zsplit": r_zero = sum((d == 0) * r, axis=0) r_plus += r_zero / 2. r_minus += r_zero / 2. T = min(r_plus, r_minus) mn = count*(count + 1.) * 0.25 se = count*(count + 1.) * (2. * count + 1.) if zero_method == "pratt": r = r[d != 0] if (len(r) != len(unique(r))): # handle ties in data replist, repnum = find_repeats(r) corr = 0.0 for i in range(len(replist)): si = repnum[i] corr += 0.5 * si * (si * si - 1.0) se -= corr se = sqrt(se / 24) z = (T - mn) / se prob = 2. * distributions.norm.sf(abs(z)) return T, prob def _hermnorm(N): # return the negatively normalized hermite polynomials up to order N-1 # (inclusive) # using the recursive relationship # p_n+1 = p_n(x)' - x*p_n(x) # and p_0(x) = 1 plist = [None]*N plist[0] = poly1d(1) for n in range(1,N): plist[n] = plist[n-1].deriv() - poly1d([1,0])*plist[n-1] return plist def pdf_fromgamma(g1,g2,g3=0.0,g4=None): if g4 is None: g4 = 3*g2*g2 sigsq = 1.0/g2 sig = sqrt(sigsq) mu = g1*sig**3.0 p12 = _hermnorm(13) for k in range(13): p12[k] = p12[k]/sig**k # Add all of the terms to polynomial totp = p12[0] - (g1/6.0*p12[3]) + \ (g2/24.0*p12[4] +g1*g1/72.0*p12[6]) - \ (g3/120.0*p12[5] + g1*g2/144.0*p12[7] + g1**3.0/1296.0*p12[9]) + \ (g4/720*p12[6] + (g2*g2/1152.0+g1*g3/720)*p12[8] + g1*g1*g2/1728.0*p12[10] + g1**4.0/31104.0*p12[12]) # Final normalization totp = totp / sqrt(2*pi)/sig def thefunc(x): xn = (x-mu)/sig return totp(xn)*exp(-xn*xn/2.0) return thefunc def circmean(samples, high=2*pi, low=0, axis=None): """ Compute the circular mean for samples in a range. Parameters ---------- samples : array_like Input array. high : float or int, optional High boundary for circular mean range. Default is ``2*pi``. low : float or int, optional Low boundary for circular mean range. Default is 0. axis : int, optional Axis along which means are computed. The default is to compute the mean of the flattened array. Returns ------- circmean : float Circular mean. """ ang = (samples - low)*2*pi / (high-low) res = angle(np.mean(exp(1j*ang), axis=axis)) mask = res < 0 if (mask.ndim > 0): res[mask] += 2*pi elif mask: res = res + 2*pi return res*(high-low)/2.0/pi + low def circvar(samples, high=2*pi, low=0, axis=None): """ Compute the circular variance for samples assumed to be in a range Parameters ---------- samples : array_like Input array. low : float or int, optional Low boundary for circular variance range. Default is 0. high : float or int, optional High boundary for circular variance range. Default is ``2*pi``. axis : int, optional Axis along which variances are computed. The default is to compute the variance of the flattened array. Returns ------- circvar : float Circular variance. Notes ----- This uses a definition of circular variance that in the limit of small angles returns a number close to the 'linear' variance. """ ang = (samples - low)*2*pi / (high-low) res = np.mean(exp(1j*ang), axis=axis) R = abs(res) return ((high-low)/2.0/pi)**2 * 2 * log(1/R) def circstd(samples, high=2*pi, low=0, axis=None): """ Compute the circular standard deviation for samples assumed to be in the range [low to high]. Parameters ---------- samples : array_like Input array. low : float or int, optional Low boundary for circular standard deviation range. Default is 0. high : float or int, optional High boundary for circular standard deviation range. Default is ``2*pi``. axis : int, optional Axis along which standard deviations are computed. The default is to compute the standard deviation of the flattened array. Returns ------- circstd : float Circular standard deviation. Notes ----- This uses a definition of circular standard deviation that in the limit of small angles returns a number close to the 'linear' standard deviation. """ ang = (samples - low)*2*pi / (high-low) res = np.mean(exp(1j*ang), axis=axis) R = abs(res) return ((high-low)/2.0/pi) * sqrt(-2*log(R)) #Tests to include (from R) -- some of these already in stats. ######## #X Ansari-Bradley #X Bartlett (and Levene) #X Binomial #Y Pearson's Chi-squared (stats.chisquare) #Y Association Between Paired samples (stats.pearsonr, stats.spearmanr) # stats.kendalltau) -- these need work though # Fisher's exact test #X Fligner-Killeen Test #Y Friedman Rank Sum (stats.friedmanchisquare?) #Y Kruskal-Wallis #Y Kolmogorov-Smirnov # Cochran-Mantel-Haenszel Chi-Squared for Count # McNemar's Chi-squared for Count #X Mood Two-Sample #X Test For Equal Means in One-Way Layout (see stats.ttest also) # Pairwise Comparisons of proportions # Pairwise t tests # Tabulate p values for pairwise comparisons # Pairwise Wilcoxon rank sum tests # Power calculations two sample test of prop. # Power calculations for one and two sample t tests # Equal or Given Proportions # Trend in Proportions # Quade Test #Y Student's T Test #Y F Test to compare two variances #XY Wilcoxon Rank Sum and Signed Rank Tests<|fim▁end|>
""" Perform Fligner's test for equal variances.
<|file_name|>ssw_wrap.py<|end_file_name|><|fim▁begin|>""" @package ssw_wrap @brief Simple python wrapper for SSW align library To use the dynamic library libssw.so you may need to modify the LD_LIBRARY_PATH environment variable to include the library directory (export LD_LIBRARY_PATH=$PWD) or for definitive inclusion of the lib edit /etc/ld.so.conf and add the path or the directory containing the library and update the cache by using /sbin/ldconfig as root @copyright [The MIT licence](http://opensource.org/licenses/MIT) @author Clement & Adrien Leger - 2014 """ #~~~~~~~GLOBAL IMPORTS~~~~~~~# # Standard library packages from ctypes import * import os def _get_libssw_path(): base = os.path.dirname(__file__) matches = [x for x in os.listdir(base) if (x.startswith("libssw") & x.endswith(".so"))] if len(matches) < 1: raise Exception("Couldn't find libssw.so in this directory: '{}'".format(base)) return os.path.join(base, matches[0]) libssw = cdll.LoadLibrary(_get_libssw_path())#os.path.join(os.path.dirname(__file__), 'libssw.so')) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# class CAlignRes(Structure): """ @class SSWAlignRes @brief ctypes Structure with s_align struct mapping returned by SSWAligner.Align func Correspond to the structure of the query profile """ #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# #~~~~~~~Ctype Structure~~~~~~~# _fields_ = [('score', c_uint16), ('score2', c_uint16), ('ref_begin', c_int32), ('ref_end', c_int32), ('query_begin', c_int32), ('query_end', c_int32), ('ref_end2', c_int32), ('cigar', POINTER(c_uint32)), ('cigarLen', c_int32)] #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# class Aligner(object): """ @class SSWAligner @brief Wrapper for SSW align library """ #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# #~~~~~~~CLASS VARIABLES~~~~~~~# # Dictionnary to map Nucleotide to int as expected by the SSW C library base_to_int = { 'A':0, 'C':1, 'G':2, 'T':3, 'N':4, 'a':0, 'c':1, 'g':2, 't':3, 'n':4} int_to_base = { 0:'A', 1:'C', 2:'G', 3:'T', 4:'N'} # Load the ssw library using ctypes # libssw = cdll.LoadLibrary('libssw.so') #libssw = cdll.LoadLibrary(_get_libssw_path())#os.path.join(os.path.dirname(__file__), 'libssw.so')) # Init and setup the functions pointer to map the one specified in the SSW lib # ssw_init method ssw_init = libssw.ssw_init ssw_init.restype = c_void_p ssw_init.argtypes = [POINTER(c_int8), c_int32, POINTER(c_int8), c_int32, c_int8] # init_destroy function init_destroy = libssw.init_destroy init_destroy.restype = None init_destroy.argtypes = [c_void_p] # ssw_align function ssw_align = libssw.ssw_align ssw_align.restype = POINTER(CAlignRes) ssw_align.argtypes = [c_void_p, POINTER(c_int8), c_int32, c_uint8, c_uint8, c_uint8, c_uint16, c_int32, c_int32] # align_destroy function align_destroy = libssw.align_destroy align_destroy.restype = None align_destroy.argtypes = [POINTER(CAlignRes)] #~~~~~~~FONDAMENTAL METHODS~~~~~~~# def __repr__(self): msg = self.__str__() msg += "SCORE PARAMETERS:\n" msg += " Gap Weight Open: {} Extension: {}\n".format(-self.gap_open, -self.gap_extend) msg += " Align Weight Match: {} Mismatch: {}\n\n".format(self.match, -self.mismatch) msg += " Match/mismatch Score matrix\n" msg += " \tA\tC\tG\tT\tN\n" msg += " A\t{}\t{}\t{}\t{}\t{}\n".format(self.match, -self.mismatch, -self.mismatch, -self.mismatch, 0) msg += " C\t{}\t{}\t{}\t{}\t{}\n".format(-self.mismatch, self.match, -self.mismatch, -self.mismatch, 0) msg += " G\t{}\t{}\t{}\t{}\t{}\n".format(-self.mismatch, -self.mismatch, self.match, -self.mismatch, 0) msg += " T\t{}\t{}\t{}\t{}\t{}\n".format(-self.mismatch, -self.mismatch, -self.mismatch, self.match, 0) msg += " N\t{}\t{}\t{}\t{}\t{}\n\n".format(0,0,0,0,0) msg += "RESULT PARAMETERS:\n" msg += " Report cigar {}\n".format(self.report_cigar) msg += " Report secondary match {}\n\n".format(self.report_secondary) msg += "REFERENCE SEQUENCE :\n" if self.ref_len <= 50: msg += "".join([self.int_to_base[i] for i in self.ref_seq])+"\n" else: msg += "".join([self.int_to_base[self.ref_seq[i]] for i in range(50)])+"...\n" msg += " Lenght :{} nucleotides\n".format(self.ref_len) return msg def __str__(self): return "\n<Instance of {} from {} >\n".format(self.__class__.__name__, self.__module__) def __init__(self, ref_seq="", match=2, mismatch=2, gap_open=3, gap_extend=1, report_secondary=False, report_cigar=False): """ Initialize object by creating an interface with ssw library fonctions A reference sequence is also assigned to the object for multiple alignment against queries with the align function @param ref_seq Reference sequence as a python string (case insensitive) @param match Weight for a match @param mismatch Absolute value of mismatch penalty @param gap_open Absolute value of gap open penalty @param gap_extend Absolute value of gap extend penalty @param report_secondary Report the 2nd best alignement if true @param report_cigar Report cigar string if true """ # Store overall alignment parameters self.report_secondary = report_secondary self.report_cigar = report_cigar # Set gap penalties self.set_gap(gap_open, gap_extend) # Set the cost matrix self.set_mat(match, mismatch) # Set the reference sequence self.set_ref(ref_seq) #~~~~~~~SETTERS METHODS~~~~~~~# def set_gap(self, gap_open=3, gap_extend=1): """ Store gapopen and gap extension penalties """ self.gap_open = gap_open self.gap_extend = gap_extend def set_mat(self, match=2, mismatch=2): """ Store match and mismatch scores then initialize a Cost matrix and fill it with match and mismatch values. Ambiguous base: no penalty """ self.match = match self.mismatch = mismatch mat_decl = c_int8 * 25 self.mat = mat_decl(match, -mismatch, -mismatch, -mismatch, 0, -mismatch, match, -mismatch, -mismatch, 0, -mismatch, -mismatch, match, -mismatch, 0, -mismatch, -mismatch, -mismatch, match, 0, 0, 0, 0, 0, 0) def set_ref(self, ref_seq): """ Determine the size of the ref sequence and cast it in a c type integer matrix """ if ref_seq: self.ref_len = len(ref_seq) self.ref_seq = self._DNA_to_int_mat (ref_seq, self.ref_len) else: self.ref_len = 0 self.ref_seq = "" #~~~~~~~PUBLIC METHODS~~~~~~~# def align(self, query_seq, min_score=0, min_len=0): """ Perform the alignment of query against the object reference sequence @param query_seq Query sequence as a python string (case insensitive) @param min_score Minimal score of match. None will be return in case of filtering out @param min_len Minimal length of match. None will be return in case of filtering out @return A SSWAlignRes Object containing informations about the alignment. """ # Determine the size of the ref sequence and cast it in a c type integer matrix query_len = len(query_seq) query_seq = self._DNA_to_int_mat (query_seq, query_len) # Create the query profile using the query sequence profile = self.ssw_init(query_seq, # Query seq in c type integers c_int32(query_len), # Length of Queryseq in bites self.mat, # Score matrix 5, # Square root of the number of elements in mat 2) # flag = no estimation of the best alignment score # Setup the mask_len parameters = distance between the optimal and suboptimal alignment # if < 15, the function will NOT return the suboptimal alignment information if query_len > 30: mask_len = query_len//2 else: mask_len = 15 c_result = self.ssw_align (profile, # Query profile self.ref_seq, # Ref seq in c type integers c_int32(self.ref_len), # Length of Refseq in bites self.gap_open, # Absolute value of gap open penalty self.gap_extend, # absolute value of gap extend penalty 1, # Bitwise FLAG for output values = return all 0, # Score filter = return all 0, # Distance filter = return all mask_len) # Distance between the optimal and suboptimal alignment # Transform the Cstructure into a python object if score and lenght match the requirements score = c_result.contents.score match_len = c_result.contents.query_end - c_result.contents.query_begin + 1 if score >= min_score and match_len >= min_len: py_result = PyAlignRes(c_result, query_len, self.report_secondary, self.report_cigar) else: py_result = None # Free reserved space by ssw.init and ssw_init methods. self._init_destroy(profile) self._align_destroy(c_result) # Return the object return py_result #~~~~~~~PRIVATE METHODS~~~~~~~# def _DNA_to_int_mat (self, seq, len_seq): """ Cast a python DNA string into a Ctype int8 matrix """ # Declare the matrix query_num_decl = c_int8 * len_seq query_num = query_num_decl() # for each letters in ATCGN transform in integers thanks to self.base_to_int for i in range(len_seq): try: value = self.base_to_int[seq[i]] # if the base is not in the canonic DNA bases assign 4 as for N except KeyError: value = 4 finally: query_num[i] = value return query_num def _init_destroy(self, profile): """ Free the space alocated for the matrix used by init """ self.init_destroy(profile) def _align_destroy(self, align): """ Free the space alocated for the matrix used by align """ self.align_destroy(align) # Load the ssw library using ctypes #glibssw = cdll.LoadLibrary(os.path.join(os.path.dirname(__file__), 'libssw.so')) # libssw = cdll.LoadLibrary('libssw.so') # Init and setup the functions pointer to map the one specified in the SSW lib # cigar_int_to_len function cigar_int_to_len = libssw.cigar_int_to_len cigar_int_to_len.restype = c_int32 cigar_int_to_len.argtypes = [c_int32] # cigar_int_to_op function cigar_int_to_op = libssw.cigar_int_to_op cigar_int_to_op.restype = c_char cigar_int_to_op.argtypes = [c_int32] #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# class PyAlignRes(object): """ @class PyAlignRes @brief Extract and verify result from a CAlignRes structure. A comprehensive python object is created according to user requirements (+- cigar string and secondary alignment) """ def __repr__(self): msg = self.__str__() msg += "OPTIMAL MATCH\n" msg += "Score {}\n".format(self.score) msg += "Reference begin {}\n".format(self.ref_begin) msg += "Reference end {}\n".format(self.ref_end) msg += "Query begin {}\n".format(self.query_begin) msg += "Query end {}\n".format(self.query_end) if self.cigar_string: msg += "Cigar_string {}\n".format(self.cigar_string) if self.score2: msg += "SUB-OPTIMAL MATCH\n" msg += "Score 2 {}\n".format(self.score2) msg += "Ref_end2 {}\n".format(self.ref_end2) return msg def __str__(self): return "\n<Instance of {} from {} >\n".format(self.__class__.__name__, self.__module__) def __init__ (self, Res, query_len, report_secondary=False, report_cigar=False):<|fim▁hole|> @param report_secondary Report the 2nd best alignement if true @param report_cigar Report cigar string if true """ # Parse value in the C type structure pointer # Minimal mandatory parameters self.score = Res.contents.score self.ref_begin = Res.contents.ref_begin self.ref_end = Res.contents.ref_end self.query_begin = Res.contents.query_begin self.query_end = Res.contents.query_end # Information for sub-optimal match if require and available score2 = Res.contents.score2 if report_secondary and score2 != 0: self.score2 = score2 self.ref_end2 = Res.contents.ref_end2 else: self.score2 = None self.ref_end2 = None # Cigar Information if CIGAR string if require and available cigar_len = Res.contents.cigarLen if report_cigar and cigar_len > 0: self.cigar_string = self._cigar_string (Res.contents.cigar, cigar_len, query_len) else: self.cigar_string = None def _cigar_string(self, cigar, cigar_len, query_len): """ Convert cigar and cigarLen into an human readable Cigar string as in SAM files """ # Empty string for iterative writing of the cigar string cigar_string = [] # If the query match do not start at its first base # = introduce a softclip at the begining if self.query_begin > 0: op_len = self.query_begin op_char = "S" cigar_string.append('{}{}'.format(op_len, op_char)) # Iterate over the cigar (pointer to a vector of int) for i in range(cigar_len): op_len = cigar_int_to_len(cigar[i]) op_char = cigar_int_to_op(cigar[i]).decode("utf-8") cigar_string.append('{}{}'.format(op_len, op_char)) # If the length of bases aligned is shorter than the overall query length # = introduce a softclip at the end end_len = query_len - self.query_end - 1 if end_len != 0: op_len = end_len op_char = "S" cigar_string.append('{}{}'.format(op_len, op_char)) return "".join(cigar_string)<|fim▁end|>
""" Parse CAlignRes structure and copy its values in object variables @param Res A CAlignRes structure @param query_len length of the query sequence
<|file_name|>seed.js<|end_file_name|><|fim▁begin|>/* * Populate DB with sample data on server start * to disable, edit config/environment/index.js, and set `seedDB: false` */ 'use strict'; var mongoose = require('mongoose'); var ObjectId = mongoose.Schema.Types.ObjectId; var env = process.env.NODE_ENV || 'development'; var User = require('../api/user/user.model').model; var Customer = require('../api/customer/customer.model').model; var Lot = require('../api/lot/lot.model').model; var Shipment = require('../api/shipment/shipment.model').model; var Product = require('../api/product/product.model').model; /* // Insert some data needed to bootstrap or init the application if ('production' === env) { // Insert some data needed to init the production instance only, update a version info ... } */ /* * Insert dummy data to test the application */ exports.users = [{ provider: 'local', name: 'Test User', password: 'password', active: true }, { provider: 'local', role: 'admin', name: 'Admin', password: 'password', active: true }, { provider: 'local', role: 'root', name: 'Root', password: 'password', active: true }]; exports.shipments = [ { // _id: '57b4bcf47a18a5b400923d78', orderId:"DKcRNc7XN", creationDate:"Aug 6, 2016", shipByDate:"Aug 12, 2016", status:"Shipment confirmed", units:1, payments:"$3.92", sku: "06F", // product: ObjectId('57b4bcf37a18a5b400923d75'), // customer: ObjectId('57b4bcf37a18a5b400923d73') }, { // _id: '57b4bcf47a18a5b400923d79', orderId:"DplMb07cN", creationDate:"Aug 6, 2016", shipByDate:"Aug 12, 2016", status:"Unshipped", units:1, payments:"$6.78", sku: '12P', // product: ObjectId('57b4bcf37a18a5b400923d76'), // customer: ObjectId('57b4bcf37a18a5b400923d74') },{ // _id: '57b4bcf47a18a5b400923d7a', orderId:"DpTMbp7bN", creationDate:"Aug 6, 2016", shipByDate:"Aug 12, 2016", status:"Shipment confirmed", units:1, payments:"$3.92", sku: "06F", // product: '57b4bcf37a18a5b400923d77', // customer: '57b4bcf37a18a5b400923d74' }]; exports.customers = [{ // _id: '57b4bcf37a18a5b400923d73', name: 'Amazon Locker - Hannah', address: { street: '12355 15th Ave NE at 7-Eleven', city: 'Seattle', state: 'WA', zip: '98125-4819' }, phone: "2064985471" },{ // _id: '57b4bcf37a18a5b400923d74', name: 'Amazon Locker - George', address: { street: '12355 15th Ave NE at 7-Eleven', city: 'Seattle', state: 'WA', zip: '98125-4819' }, phone: "2064985468" }]; exports.lots = [{ created: "Aug 11, 2016", shipped: "Aug 12, 2016", shipments: [] }]; exports.products = [{ // _id: '57b4bcf37a18a5b400923d77', name: 'Finess Softpatch for Stress Incontinence', upc: '860507000213', asin: 'B01438A52M' }, { // _id: '57b4bcf37a18a5b400923d76', name: 'Finess Softpatch for Stress Incontinence', upc: '860507000206', asin: 'B013TT27TA' }, { // _id: '57b4bcf37a18a5b400923d75', name: 'Finess Softpatch for Stress Incontinence', upc: '860507000220', asin: 'B01DEDVJLI' }]; // Join all address fields of customers exports.customers = exports.customers.map(function(customer) { var addr = customer.address, lines = []; lines.push(addr.street); lines.push(addr.city + ', ' + addr.state + ' ' + addr.zip); customer.address = lines.join('\n'); return customer; }) if ('development' === env || 'test' === env) { console.log('Populating test and development data ...'); User.find({}).remove(function () { User.create(exports.users, function (err) { if (err) { console.error('Error while populating users: %s', err); } else { console.log('finished populating users'); } }); }); Customer.find({}).remove(function () { Customer.create(exports.customers, function (err) { if (err) { console.error('Error while populating customers: %s', err); } else { console.log('finished populating customers'); popProducts(); }<|fim▁hole|> function popProducts() { Product.find({}).remove(function () { Product.create(exports.products, function (err) { if (err) { console.error('Error while populating products: %s', err); } else { console.log('finished populating products'); popShipments(); } }); }); } function popShipments() { Shipment.find({}).remove(function () { Customer.find({}).exec().then(function(customers) { exports.shipments.forEach(function (shipment, i) { shipment.customer = customers[i%2]['_id']; }) Product.find({}).exec().then(function(products) { exports.shipments.forEach(function (shipment, i) { shipment.product = products[i%3]['_id']; }) Shipment.create(exports.shipments, function (err) { if (err) { console.error('Error while populating shipments: %s', err); } else { console.log('finished populating shipments'); popLots(); } }); }); }) }); } function popLots() { Lot.find({}).remove(function () { Shipment.find({}).exec().then(function(shipments) { // console.log('in popLots, shipments: ' + JSON.stringify(shipments)) exports.lots.forEach(function (lot, i) { lot.shipments.push(shipments[0]['_id']); lot.shipments.push(shipments[2]['_id']); // console.log('created lot shipments: ' + JSON.stringify(lot.shipments)) }) Lot.create(exports.lots, function (err) { if (err) { console.error('Error while populating lots: %s', err); } else { console.log('finished populating lots'); } }); }) }); } }<|fim▁end|>
}); });
<|file_name|>at_sep.js<|end_file_name|><|fim▁begin|>AT.prototype.atSepHelpers = { sepText: function(){<|fim▁hole|> }, };<|fim▁end|>
return T9n.get(AccountsTemplates.texts.sep, markIfMissing=false);
<|file_name|>placeholder.go<|end_file_name|><|fim▁begin|>package soymsg import ( "bytes" "regexp" "strconv" "strings" "github.com/robfig/soy/ast" ) // setPlaceholderNames generates the placeholder names for all children of the // given message node, setting the .Name property on them. func setPlaceholderNames(n *ast.MsgNode) { // Step 1: Determine representative nodes and build preliminary map var ( baseNameToRepNodes = make(map[string][]ast.Node) equivNodeToRepNodes = make(map[ast.Node]ast.Node) ) var nodeQueue []ast.Node = phNodes(n.Body) for len(nodeQueue) > 0 { var node = nodeQueue[0] nodeQueue = nodeQueue[1:] var baseName string switch node := node.(type) { case *ast.MsgPlaceholderNode: baseName = genBasePlaceholderName(node.Body, "XXX") case *ast.MsgPluralNode: nodeQueue = append(nodeQueue, pluralCaseBodies(node)...) baseName = genBasePlaceholderName(node.Value, "NUM") default: panic("unexpected") } if nodes, ok := baseNameToRepNodes[baseName]; !ok { baseNameToRepNodes[baseName] = []ast.Node{node} } else { var isNew = true var str = node.String() for _, other := range nodes { if other.String() == str { equivNodeToRepNodes[node] = other isNew = false break } } if isNew { baseNameToRepNodes[baseName] = append(nodes, node) } } } // Step 2: Build final maps of name to representative node var nameToRepNodes = make(map[string]ast.Node) for baseName, nodes := range baseNameToRepNodes { if len(nodes) == 1 { nameToRepNodes[baseName] = nodes[0] continue } var nextSuffix = 1 for _, node := range nodes { for { var newName = baseName + "_" + strconv.Itoa(nextSuffix) if _, ok := nameToRepNodes[newName]; !ok { nameToRepNodes[newName] = node break } nextSuffix++ } } } // Step 3: Create maps of every node to its name var nodeToName = make(map[ast.Node]string) for name, node := range nameToRepNodes { nodeToName[node] = name } for other, repNode := range equivNodeToRepNodes { nodeToName[other] = nodeToName[repNode] } // Step 4: Set the calculated names on all the nodes. for node, name := range nodeToName { switch node := node.(type) { case *ast.MsgPlaceholderNode: node.Name = name case *ast.MsgPluralNode: node.VarName = name default: panic("unexpected: " + node.String()) } } } func phNodes(n ast.ParentNode) []ast.Node { var nodeQueue []ast.Node for _, child := range n.Children() { switch child := child.(type) { case *ast.MsgPlaceholderNode, *ast.MsgPluralNode: nodeQueue = append(nodeQueue, child) } } return nodeQueue } func pluralCaseBodies(node *ast.MsgPluralNode) []ast.Node { var r []ast.Node for _, plCase := range node.Cases { r = append(r, phNodes(plCase.Body)...) } return append(r, phNodes(node.Default)...) } func genBasePlaceholderName(node ast.Node, defaultName string) string { // TODO: user supplied placeholder (phname) switch part := node.(type) { case *ast.PrintNode: return genBasePlaceholderNameFromExpr(part.Arg, defaultName) case *ast.MsgHtmlTagNode: return genBasePlaceholderNameFromHtml(part) case *ast.DataRefNode: return genBasePlaceholderNameFromExpr(node, defaultName) } return defaultName } func genBasePlaceholderNameFromExpr(expr ast.Node, defaultName string) string { switch expr := expr.(type) { case *ast.GlobalNode: return toUpperUnderscore(expr.Name) case *ast.DataRefNode: if len(expr.Access) == 0 { return toUpperUnderscore(expr.Key) } var lastChild = expr.Access[len(expr.Access)-1] if lastChild, ok := lastChild.(*ast.DataRefKeyNode); ok { return toUpperUnderscore(lastChild.Key) } } return defaultName } var htmlTagNames = map[string]string{ "a": "link", "br": "break", "b": "bold", "i": "italic", "li": "item", "ol": "ordered_list", "ul": "unordered_list", "p": "paragraph", "img": "image", "em": "emphasis", } func genBasePlaceholderNameFromHtml(node *ast.MsgHtmlTagNode) string { var tag, tagType = tagName(node.Text) if prettyName, ok := htmlTagNames[tag]; ok { tag = prettyName } return toUpperUnderscore(tagType + tag) } func tagName(text []byte) (name, tagType string) { switch { case bytes.HasPrefix(text, []byte("</")):<|fim▁hole|> default: tagType = "START_" } text = bytes.TrimPrefix(text, []byte("<")) text = bytes.TrimPrefix(text, []byte("/")) for i, ch := range text { if !isAlphaNumeric(ch) { return strings.ToLower(string(text[:i])), tagType } } // the parser should never produce html tag nodes that tagName can't handle. panic("no tag name found: " + string(text)) } func isAlphaNumeric(r byte) bool { return 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' || '0' <= r && r <= '9' } var ( leadingOrTrailing_ = regexp.MustCompile("^_+|_+$") consecutive_ = regexp.MustCompile("__+") wordBoundary1 = regexp.MustCompile("([a-zA-Z])([A-Z][a-z])") // <letter>_<upper><lower> wordBoundary2 = regexp.MustCompile("([a-zA-Z])([0-9])") // <letter>_<digit> wordBoundary3 = regexp.MustCompile("([0-9])([a-zA-Z])") // <digit>_<letter> ) func toUpperUnderscore(ident string) string { ident = leadingOrTrailing_.ReplaceAllString(ident, "") ident = consecutive_.ReplaceAllString(ident, "${1}_${2}") ident = wordBoundary1.ReplaceAllString(ident, "${1}_${2}") ident = wordBoundary2.ReplaceAllString(ident, "${1}_${2}") ident = wordBoundary3.ReplaceAllString(ident, "${1}_${2}") return strings.ToUpper(ident) }<|fim▁end|>
tagType = "END_" case bytes.HasSuffix(text, []byte("/>")): tagType = ""
<|file_name|>RobotData.java<|end_file_name|><|fim▁begin|>package frc.team5333.lib; import java.util.HashMap; /** * A static class that contains all kinds of Launch data for the robot, * such as network ports, current state and more * * @author Jaci */ public class RobotData { /**<|fim▁hole|> */ public static HashMap<String, Object> blackboard = new HashMap<String, Object>(); }<|fim▁end|>
* A blackboard containing objects that are common throughout the * program, along with their String Identifier
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from pyd.support import setup, Extension, pydexe_sanity_check pydexe_sanity_check() projName = 'interpcontext' setup( name=projName, version='1.0',<|fim▁hole|> build_deimos=True, d_lump=True ) ], )<|fim▁end|>
ext_modules=[ Extension(projName, ['interpcontext.d'],
<|file_name|>ExtendedToolTip.java<|end_file_name|><|fim▁begin|>/* * aTunes * Copyright (C) Alex Aranda, Sylvain Gaudard and contributors * * See http://www.atunes.org/wiki/index.php?title=Contributing for information about contributors * * http://www.atunes.org * http://sourceforge.net/projects/atunes * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ package net.sourceforge.atunes.gui.views.dialogs; import java.awt.GridBagConstraints; import java.awt.GridBagLayout; import java.awt.GridLayout; import java.awt.Insets; import javax.swing.ImageIcon; import javax.swing.JLabel; import javax.swing.JPanel; import javax.swing.JScrollPane; import javax.swing.ScrollPaneConstants; import net.sourceforge.atunes.Constants; import net.sourceforge.atunes.gui.views.controls.AbstractCustomWindow; import net.sourceforge.atunes.gui.views.controls.FadeInPanel; import net.sourceforge.atunes.model.IControlsBuilder; import net.sourceforge.atunes.utils.ImageUtils; /** * The Class ExtendedToolTip. This is a special window shown as tooltip for * navigator tree objects */ public final class ExtendedToolTip extends AbstractCustomWindow { private static final long serialVersionUID = -5041702404982493070L; <|fim▁hole|> private final JLabel line2; private final JLabel line3; /** * Instantiates a new extended tool tip. * * @param controlsBuilder * @param width * @param height */ public ExtendedToolTip(final IControlsBuilder controlsBuilder, final int width, final int height) { super(null, width, height, controlsBuilder); setFocusableWindowState(false); JPanel container = new JPanel(new GridBagLayout()); this.image = new JLabel(); this.imagePanel = new FadeInPanel(); this.imagePanel.setLayout(new GridLayout(1, 1)); this.imagePanel.add(this.image); this.line1 = new JLabel(); this.line2 = new JLabel(); this.line3 = new JLabel(); GridBagConstraints c = new GridBagConstraints(); c.gridx = 0; c.gridy = 0; c.gridheight = 3; c.insets = new Insets(0, 5, 0, 0); container.add(this.imagePanel, c); c.gridx = 1; c.gridheight = 1; c.weightx = 1; c.anchor = GridBagConstraints.WEST; // c.fill = GridBagConstraints.HORIZONTAL; c.insets = new Insets(10, 10, 0, 10); container.add(this.line1, c); c.gridx = 1; c.gridy = 1; c.insets = new Insets(0, 10, 0, 10); container.add(this.line2, c); c.gridx = 1; c.gridy = 2; c.weighty = 1; c.anchor = GridBagConstraints.NORTHWEST; c.insets = new Insets(0, 10, 0, 10); container.add(this.line3, c); // Use scroll pane to draw a border consistent with look and feel JScrollPane scrollPane = controlsBuilder.createScrollPane(container); scrollPane .setHorizontalScrollBarPolicy(ScrollPaneConstants.HORIZONTAL_SCROLLBAR_NEVER); scrollPane .setVerticalScrollBarPolicy(ScrollPaneConstants.VERTICAL_SCROLLBAR_NEVER); add(scrollPane); } /** * Sets the text of line 1 * * @param text * */ public void setLine1(final String text) { this.line1.setText(text); } /** * Sets the text of line 2 * * @param text * */ public void setLine2(final String text) { this.line2.setText(text); } /** * Sets the image * * @param img * the new image */ public void setImage(final ImageIcon img) { if (img != null) { // Add 50 to width to force images to fit height of tool tip as much // as possible this.image.setIcon(ImageUtils.scaleImageBicubic(img.getImage(), Constants.TOOLTIP_IMAGE_WIDTH + 50, Constants.TOOLTIP_IMAGE_HEIGHT)); this.imagePanel.setVisible(true); } else { this.image.setIcon(null); this.imagePanel.setVisible(false); } } /** * Sets the text of line 3 * * @param text * */ public void setLine3(final String text) { this.line3.setText(text); } }<|fim▁end|>
private final FadeInPanel imagePanel; private final JLabel image; private final JLabel line1;
<|file_name|>builders.rs<|end_file_name|><|fim▁begin|>// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. ///! Many builders are available to easily create different types of arrow arrays extern crate arrow; use arrow::array::Int32Builder; fn main() { // Primitive Arrays // // Primitive arrays are arrays of fixed-width primitive types (bool, u8, u16, u32, // u64, i8, i16, i32, i64, f32, f64) // Create a new builder with a capacity of 100 let mut primitive_array_builder = Int32Builder::new(100); // Append an individual primitive value primitive_array_builder.append_value(55).unwrap(); // Append a null value primitive_array_builder.append_null().unwrap(); // Append a slice of primitive values primitive_array_builder.append_slice(&[39, 89, 12]).unwrap(); // Build the `PrimitiveArray`<|fim▁hole|><|fim▁end|>
let _primitive_array = primitive_array_builder.finish(); }
<|file_name|>builder_test.go<|end_file_name|><|fim▁begin|>// +build !windows package iso import ( "context" "fmt" "reflect" "strconv" "testing" "github.com/hashicorp/packer-plugin-sdk/common" "github.com/hashicorp/packer-plugin-sdk/multistep" packersdk "github.com/hashicorp/packer-plugin-sdk/packer" hypervcommon "github.com/hashicorp/packer/builder/hyperv/common" ) func testConfig() map[string]interface{} { return map[string]interface{}{ "iso_checksum": "md5:0B0F137F17AC10944716020B018F8126", "iso_url": "http://www.packer.io", "shutdown_command": "yes", "ssh_username": "foo", "memory": 64, "disk_size": 256, "disk_block_size": 1, "guest_additions_mode": "none", "disk_additional_size": "50000,40000,30000", common.BuildNameConfigKey: "foo", } } func TestBuilder_ImplementsBuilder(t *testing.T) { var raw interface{} raw = &Builder{} if _, ok := raw.(packersdk.Builder); !ok { t.Error("Builder must implement builder.") } } func TestBuilderPrepare_Defaults(t *testing.T) { var b Builder config := testConfig() _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Fatalf("should not have error: %s", err) } if b.config.VMName != "packer-foo" { t.Errorf("bad vm name: %s", b.config.VMName) } } func TestBuilderPrepare_DiskSize(t *testing.T) { var b Builder config := testConfig() delete(config, "disk_size") _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Fatalf("bad err: %s", err) } if b.config.DiskSize != 40*1024 { t.Fatalf("bad size: %d", b.config.DiskSize) } config["disk_size"] = 256 b = Builder{} _, warns, err = b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Fatalf("should not have error: %s", err) } if b.config.DiskSize != 256 { t.Fatalf("bad size: %d", b.config.DiskSize) } } func TestBuilderPrepare_DiskBlockSize(t *testing.T) { var b Builder config := testConfig() expected_default_block_size := uint(32) expected_min_block_size := uint(0) expected_max_block_size := uint(256) // Test default with empty disk_block_size delete(config, "disk_block_size") _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Fatalf("bad err: %s", err) } if b.config.DiskBlockSize != expected_default_block_size { t.Fatalf("bad default block size with empty config: %d. Expected %d", b.config.DiskBlockSize, expected_default_block_size) } test_sizes := []uint{0, 1, 32, 256, 512, 1 * 1024, 32 * 1024} for _, test_size := range test_sizes { config["disk_block_size"] = test_size b = Builder{} _, warns, err = b.Prepare(config) if test_size > expected_max_block_size || test_size < expected_min_block_size { if len(warns) > 0 { t.Fatalf("bad, should have no warns: %#v", warns) } if err == nil { t.Fatalf("bad, should have error. disk_block_size=%d outside expected valid range [%d,%d]", test_size, expected_min_block_size, expected_max_block_size) } } else { if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Fatalf("bad, should not have error: %s", err) } if test_size == 0 { if b.config.DiskBlockSize != expected_default_block_size { t.Fatalf("bad default block size with 0 value config: %d. Expected: %d", b.config.DiskBlockSize, expected_default_block_size) } } else { if b.config.DiskBlockSize != test_size { t.Fatalf("bad block size with 0 value config: %d. Expected: %d", b.config.DiskBlockSize, expected_default_block_size) } } } } } func TestBuilderPrepare_FixedVHDFormat(t *testing.T) { var b Builder config := testConfig() config["use_fixed_vhd_format"] = true config["generation"] = 1 config["skip_compaction"] = true config["differencing_disk"] = false // use_fixed_vhd_format should work with generation = 1, skip_compaction // = true, and differencing_disk = false _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Fatalf("bad err: %s", err) } //use_fixed_vhd_format should not work with differencing_disk = true config["differencing_disk"] = true b = Builder{} _, warns, err = b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err == nil { t.Fatal("should have error") } config["differencing_disk"] = false //use_fixed_vhd_format should not work with skip_compaction = false config["skip_compaction"] = false b = Builder{} _, warns, err = b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err == nil { t.Fatal("should have error") } config["skip_compaction"] = true //use_fixed_vhd_format should not work with generation = 2 config["generation"] = 2 b = Builder{} _, warns, err = b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err == nil { t.Fatal("should have error") } } func TestBuilderPrepare_FloppyFiles(t *testing.T) { var b Builder config := testConfig() delete(config, "floppy_files") _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Fatalf("bad err: %s", err) } if len(b.config.FloppyFiles) != 0 { t.Fatalf("bad: %#v", b.config.FloppyFiles) } floppiesPath := "../../test-fixtures/floppies" config["floppy_files"] = []string{fmt.Sprintf("%s/bar.bat", floppiesPath), fmt.Sprintf("%s/foo.ps1", floppiesPath)} b = Builder{} _, warns, err = b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Fatalf("should not have error: %s", err) } expected := []string{fmt.Sprintf("%s/bar.bat", floppiesPath), fmt.Sprintf("%s/foo.ps1", floppiesPath)} if !reflect.DeepEqual(b.config.FloppyFiles, expected) { t.Fatalf("bad: %#v", b.config.FloppyFiles) } } func TestBuilderPrepare_InvalidFloppies(t *testing.T) { var b Builder config := testConfig() config["floppy_files"] = []string{"nonexistent.bat", "nonexistent.ps1"} b = Builder{} _, _, errs := b.Prepare(config) if errs == nil { t.Fatalf("Nonexistent floppies should trigger multierror") } if len(errs.(*packersdk.MultiError).Errors) != 2 { t.Fatalf("Multierror should work and report 2 errors") } } func TestBuilderPrepare_InvalidKey(t *testing.T) { var b Builder config := testConfig() // Add a random key config["i_should_not_be_valid"] = true _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err == nil { t.Fatal("should have error") } } func TestBuilderPrepare_ISOChecksum(t *testing.T) { var b Builder config := testConfig() // Test bad config["iso_checksum"] = "" _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err == nil { t.Fatal("should have error") } // Test invalid checksum config["iso_checksum"] = "FOo" _, warns, err = b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err == nil { t.Fatal("should have error") } // Test good config["iso_checksum"] = "md5:0B0F137F17AC10944716020B018F8126" b = Builder{} _, warns, err = b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Fatalf("should not have error: %s", err) } } func TestBuilderPrepare_ISOChecksumType(t *testing.T) { var b Builder config := testConfig() config["iso_checksum"] = "0B0F137F17AC10944716020B018F8126" _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Fatalf("should not have error: %s", err) } // Test good config["iso_checksum"] = "mD5:0B0F137F17AC10944716020B018F8126" b = Builder{} _, warns, err = b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Fatalf("should not have error: %s", err) } // Test unknown config["iso_checksum"] = "fake:foo" b = Builder{} _, warns, err = b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err == nil { t.Log("should error in prepare but go-getter doesn't let us validate yet. This will fail before dl.") } // Test none config["iso_checksum"] = "none" b = Builder{} _, warns, err = b.Prepare(config)<|fim▁hole|> if err != nil { t.Fatalf("should not have error: %s", err) } } func TestBuilderPrepare_ISOUrl(t *testing.T) { var b Builder config := testConfig() delete(config, "iso_url") delete(config, "iso_urls") // Test both empty config["iso_url"] = "" b = Builder{} _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err == nil { t.Fatal("should have error") } // Test iso_url set config["iso_url"] = "http://www.packer.io" b = Builder{} _, warns, err = b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Errorf("should not have error: %s", err) } expected := []string{"http://www.packer.io"} if !reflect.DeepEqual(b.config.ISOUrls, expected) { t.Fatalf("bad: %#v", b.config.ISOUrls) } // Test both set config["iso_url"] = "http://www.packer.io" config["iso_urls"] = []string{"http://www.packer.io"} b = Builder{} _, warns, err = b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err == nil { t.Fatal("should have error") } // Test just iso_urls set delete(config, "iso_url") config["iso_urls"] = []string{ "http://www.packer.io", "http://www.hashicorp.com", } b = Builder{} _, warns, err = b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Errorf("should not have error: %s", err) } expected = []string{ "http://www.packer.io", "http://www.hashicorp.com", } if !reflect.DeepEqual(b.config.ISOUrls, expected) { t.Fatalf("bad: %#v", b.config.ISOUrls) } } func TestBuilderPrepare_SizeNotRequiredWhenUsingExistingHarddrive(t *testing.T) { var b Builder config := testConfig() delete(config, "iso_url") delete(config, "iso_urls") delete(config, "disk_size") config["disk_size"] = 1 // Test just iso_urls set but with vhdx delete(config, "iso_url") config["iso_urls"] = []string{ "http://www.packer.io/hdd.vhdx", "http://www.hashicorp.com/dvd.iso", } b = Builder{} _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Errorf("should not have error: %s", err) } expected := []string{ "http://www.packer.io/hdd.vhdx", "http://www.hashicorp.com/dvd.iso", } if !reflect.DeepEqual(b.config.ISOUrls, expected) { t.Fatalf("bad: %#v", b.config.ISOUrls) } // Test just iso_urls set but with vhd delete(config, "iso_url") config["iso_urls"] = []string{ "http://www.packer.io/hdd.vhd", "http://www.hashicorp.com/dvd.iso", } b = Builder{} _, warns, err = b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Errorf("should not have error: %s", err) } expected = []string{ "http://www.packer.io/hdd.vhd", "http://www.hashicorp.com/dvd.iso", } if !reflect.DeepEqual(b.config.ISOUrls, expected) { t.Fatalf("bad: %#v", b.config.ISOUrls) } } func TestBuilderPrepare_SizeIsRequiredWhenNotUsingExistingHarddrive(t *testing.T) { var b Builder config := testConfig() delete(config, "iso_url") delete(config, "iso_urls") delete(config, "disk_size") config["disk_size"] = 1 // Test just iso_urls set but with vhdx delete(config, "iso_url") config["iso_urls"] = []string{ "http://www.packer.io/os.iso", "http://www.hashicorp.com/dvd.iso", } b = Builder{} _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err == nil { t.Errorf("should have error") } expected := []string{ "http://www.packer.io/os.iso", "http://www.hashicorp.com/dvd.iso", } if !reflect.DeepEqual(b.config.ISOUrls, expected) { t.Fatalf("bad: %#v", b.config.ISOUrls) } } func TestBuilderPrepare_MaximumOfSixtyFourAdditionalDisks(t *testing.T) { var b Builder config := testConfig() disks := make([]string, 65) for i := range disks { disks[i] = strconv.Itoa(i) } config["disk_additional_size"] = disks b = Builder{} _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err == nil { t.Errorf("should have error") } } func TestBuilderPrepare_CommConfig(t *testing.T) { // Test Winrm { config := testConfig() config["communicator"] = "winrm" config["winrm_username"] = "username" config["winrm_password"] = "password" config["winrm_host"] = "1.2.3.4" var b Builder _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Fatalf("should not have error: %s", err) } if b.config.Comm.WinRMUser != "username" { t.Errorf("bad winrm_username: %s", b.config.Comm.WinRMUser) } if b.config.Comm.WinRMPassword != "password" { t.Errorf("bad winrm_password: %s", b.config.Comm.WinRMPassword) } if host := b.config.Comm.Host(); host != "1.2.3.4" { t.Errorf("bad host: %s", host) } } // Test SSH { config := testConfig() config["communicator"] = "ssh" config["ssh_username"] = "username" config["ssh_password"] = "password" config["ssh_host"] = "1.2.3.4" var b Builder _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Fatalf("should not have error: %s", err) } if b.config.Comm.SSHUsername != "username" { t.Errorf("bad ssh_username: %s", b.config.Comm.SSHUsername) } if b.config.Comm.SSHPassword != "password" { t.Errorf("bad ssh_password: %s", b.config.Comm.SSHPassword) } if host := b.config.Comm.Host(); host != "1.2.3.4" { t.Errorf("bad host: %s", host) } } } func TestUserVariablesInBootCommand(t *testing.T) { var b Builder config := testConfig() config[common.UserVariablesConfigKey] = map[string]string{"test-variable": "test"} config["boot_command"] = []string{"blah {{user `test-variable`}} blah"} _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Fatalf("should not have error: %s", err) } ui := packersdk.TestUi(t) hook := &packersdk.MockHook{} driver := &hypervcommon.DriverMock{} // Set up the state. state := new(multistep.BasicStateBag) state.Put("config", &b.config) state.Put("driver", driver) state.Put("hook", hook) state.Put("http_port", 0) state.Put("http_ip", "0.0.0.0") state.Put("ui", ui) state.Put("vmName", "packer-foo") step := &hypervcommon.StepTypeBootCommand{ BootCommand: b.config.FlatBootCommand(), SwitchName: b.config.SwitchName, Ctx: b.config.ctx, } ret := step.Run(context.Background(), state) if ret != multistep.ActionContinue { t.Fatalf("should not have error: %#v", ret) } } func TestBuilderPrepare_UseLegacyNetworkAdapter(t *testing.T) { var b Builder config := testConfig() // should be allowed for default config config["use_legacy_network_adapter"] = true b = Builder{} _, warns, err := b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err != nil { t.Errorf("should not have error: %s", err) } // should not be allowed for gen 2 config["generation"] = 2 b = Builder{} _, warns, err = b.Prepare(config) if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } if err == nil { t.Fatal("should have error") } }<|fim▁end|>
if len(warns) == 0 { t.Fatalf("bad: %#v", warns) }
<|file_name|>CRConfigMaterialTest.java<|end_file_name|><|fim▁begin|>/* * Copyright 2022 ThoughtWorks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at *<|fim▁hole|> * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.thoughtworks.go.plugin.configrepo.contract.material; import com.google.gson.JsonObject; import com.thoughtworks.go.plugin.configrepo.contract.AbstractCRTest; import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.List; import java.util.Map; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; public class CRConfigMaterialTest extends AbstractCRTest<CRConfigMaterial> { private final CRConfigMaterial named; private final CRConfigMaterial namedDest; private final CRConfigMaterial materialWithIgnores; private final CRConfigMaterial invalidList; public CRConfigMaterialTest() { named = new CRConfigMaterial("primary", null,null); namedDest = new CRConfigMaterial("primary", "folder",null); List<String> patterns = new ArrayList<>(); patterns.add("externals"); patterns.add("tools"); materialWithIgnores = new CRConfigMaterial("primary", "folder",new CRFilter(patterns,false)); CRFilter badFilter = new CRFilter(patterns,false); badFilter.setIncludesNoCheck(patterns); invalidList = new CRConfigMaterial("primary", "folder",badFilter); } @Override public void addGoodExamples(Map<String, CRConfigMaterial> examples) { examples.put("namedExample", named); examples.put("namedDest", namedDest); examples.put("ignoreFilter", materialWithIgnores); } @Override public void addBadExamples(Map<String, CRConfigMaterial> examples) { examples.put("invalidList",invalidList); } @Test public void shouldAppendTypeFieldWhenSerializingMaterials() { CRMaterial value = named; JsonObject jsonObject = (JsonObject)gson.toJsonTree(value); assertThat(jsonObject.get("type").getAsString(), is(CRConfigMaterial.TYPE_NAME)); } @Test public void shouldHandlePolymorphismWhenDeserializing() { CRMaterial value = named; String json = gson.toJson(value); CRConfigMaterial deserializedValue = (CRConfigMaterial)gson.fromJson(json,CRMaterial.class); assertThat("Deserialized value should equal to value before serialization", deserializedValue,is(value)); } }<|fim▁end|>
* http://www.apache.org/licenses/LICENSE-2.0
<|file_name|>cl_todo_category.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*- ################################################################################ # # # Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU Affero General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU Affero General Public License for more details. # # # # You should have received a copy of the GNU Affero General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # ################################################################################ from openerp import models, fields, api class Category(models.Model): _name = 'cl_todo.task.category' name = fields.Char('Category', required=True, size=128, translate=True) parent_id = fields.Many2one('cl_todo.task.category', 'Parent Category', select=True, ondelete='restrict') code = fields.Char('Code', size=128, required=False) description = fields.Char(string='Description', size=256) notes = fields.Text(string='Notes') complete_name = fields.Char(string='Full Category', compute='_name_get_fnc', store=False, readonly=True) child_ids = fields.One2many('cl_todo.task.category', 'parent_id', 'Child Categories') active = fields.Boolean('Active', help="If unchecked, it will allow you to hide the category without removing it.", default=1) parent_left = fields.Integer('Left parent', select=True) parent_right = fields.Integer('Right parent', select=True) todo_ids = fields.Many2many( 'cl_todo.task', 'cl_todo_task_category_rel', 'category_id', 'todo_task_id', 'Todo Tasks' ) _sql_constraints = [ ('uniq_code', 'unique(code)', "Error! The Code must be unique!"), ] _constraints = [( models.Model._check_recursion, 'Error! You can not create recursive categories.', ['parent_id'] )] _parent_store = True _parent_order = 'name' _order = 'parent_left' @api.multi def name_get(self): """Return the category's display name, including their direct parent by default. :param dict context: the ``category_display`` key can be used to select the short version of the category (without the direct parent), when set to ``'short'``. The default is the long version.""" if self._context is None: self._context = {} if self._context.get('category_display') == 'short': return super(Category, self).name_get() if isinstance(self._ids, (int, long)): self._ids = [self._ids] reads = self.read(['name', 'parent_id']) res = [] for record in reads:<|fim▁hole|> name = record['name'] if record['parent_id']: name = record['parent_id'][1] + ' / ' + name res.append((record['id'], name)) return res @api.model def name_search(self, name, args=None, operator='ilike', limit=100): args = args or [] if name: name = name.split(' / ')[-1] args = [('name', operator, name)] + args categories = self.search(args, limit=limit) return categories.name_get() @api.one def _name_get_fnc(self): self.refresh_complete_name = 0 complete_name = self.name_get() if complete_name: self.complete_name = complete_name[0][1] else: self.complete_name = self.name class TodoTask(models.Model): _inherit = 'cl_todo.task' category_ids = fields.Many2many( 'cl_todo.task.category', 'cl_todo_task_category_rel', 'todo_task_id', 'category_id', 'Categories' )<|fim▁end|>
<|file_name|>main.rs<|end_file_name|><|fim▁begin|><|fim▁hole|> println!("Hello World!"); }<|fim▁end|>
fn main() {
<|file_name|>notification-system.test.js<|end_file_name|><|fim▁begin|>/* global sinon */ import React, { Component } from 'react'; import TestUtils from 'react-dom/test-utils'; import { expect } from 'chai'; import NotificationSystem from 'NotificationSystem'; import { positions, levels } from 'constants'; import merge from 'object-assign'; const defaultNotification = { title: 'This is a title', message: 'This is a message', level: 'success' }; const style = { Containers: { DefaultStyle: { width: 600 }, tl: { width: 800 } } }; describe('Notification Component', function() { let node; let instance; let component; let clock; let notificationObj; const ref = 'notificationSystem'; this.timeout(10000); beforeEach(() => { // We need to create this wrapper so we can use refs class ElementWrapper extends Component { render() { return <NotificationSystem ref={ ref } style={ style } allowHTML={ true } noAnimation={ true } />; } } node = window.document.createElement('div'); instance = TestUtils.renderIntoDocument(React.createElement(ElementWrapper), node); component = instance.refs[ref]; notificationObj = merge({}, defaultNotification); clock = sinon.useFakeTimers(); }); afterEach(() => { clock.restore(); }); it('should be rendered', done => { component = TestUtils.findRenderedDOMComponentWithClass(instance, 'notifications-wrapper'); expect(component).to.not.be.null; done(); }); it('should hold the component ref', done => { expect(component).to.not.be.null; done(); }); it('should render a single notification', done => { component.addNotification(defaultNotification); let notification = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notification.length).to.equal(1); done(); }); it('should not set a notification visibility class when the notification is initially added', done => { component.addNotification(defaultNotification); let notification = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); expect(notification.className).to.not.match(/notification-hidden/); expect(notification.className).to.not.match(/notification-visible/); done(); }); it('should set the notification class to visible after added', done => { component.addNotification(defaultNotification); let notification = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); expect(notification.className).to.match(/notification/); clock.tick(400); expect(notification.className).to.match(/notification-visible/); done(); }); it('should add additional classes to the notification if specified', done => { component.addNotification(Object.assign({},defaultNotification, {className: 'FOO'})); let notification = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); expect(notification.className).to.contain(' FOO'); done(); }); it('should render notifications in all positions with all levels', done => { let count = 0; for (let position of Object.keys(positions)) { for (let level of Object.keys(levels)) { notificationObj.position = positions[position]; notificationObj.level = levels[level]; component.addNotification(notificationObj); count++; } } let containers = []; for (let position of Object.keys(positions)) { containers.push(TestUtils.findRenderedDOMComponentWithClass(instance, 'notifications-' + positions[position])); } containers.forEach(function(container) { for (let level of Object.keys(levels)) { let notification = container.getElementsByClassName('notification-' + levels[level]); expect(notification).to.not.be.null; } }); let notifications = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notifications.length).to.equal(count); done(); }); it('should render multiple notifications', done => { const randomNumber = Math.floor(Math.random(5, 10)); for (let i = 1; i <= randomNumber; i++) { component.addNotification(defaultNotification); } let notifications = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notifications.length).to.equal(randomNumber); done(); }); it('should not render notifications with the same uid', done => { notificationObj.uid = 500; component.addNotification(notificationObj); component.addNotification(notificationObj); let notification = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notification.length).to.equal(1); done(); }); it('should remove a notification after autoDismiss', function(done) { notificationObj.autoDismiss = 2; component.addNotification(notificationObj); clock.tick(3000); let notification = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notification.length).to.equal(0); done(); }); it('should remove a notification using returned object', done => { let notificationCreated = component.addNotification(defaultNotification); let notification = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notification.length).to.equal(1); component.removeNotification(notificationCreated); clock.tick(1000); let notificationRemoved = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notificationRemoved.length).to.equal(0); done(); }); it('should remove a notification using uid', done => { let notificationCreated = component.addNotification(defaultNotification); let notification = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notification.length).to.equal(1); component.removeNotification(notificationCreated.uid); clock.tick(200); let notificationRemoved = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notificationRemoved.length).to.equal(0); done(); }); it('should edit an existing notification using returned object', (done) => { const notificationCreated = component.addNotification(defaultNotification); const notification = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notification.length).to.equal(1); const newTitle = 'foo'; const newContent = 'foobar'; component.editNotification(notificationCreated, { title: newTitle, message: newContent }); clock.tick(1000); const notificationEdited = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); expect(notificationEdited.getElementsByClassName('notification-title')[0].textContent).to.equal(newTitle); expect(notificationEdited.getElementsByClassName('notification-message')[0].textContent).to.equal(newContent); done(); }); it('should edit an existing notification using uid', (done) => { const notificationCreated = component.addNotification(defaultNotification); const notification = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notification.length).to.equal(1); const newTitle = 'foo'; const newContent = 'foobar'; component.editNotification(notificationCreated.uid, { title: newTitle, message: newContent }); clock.tick(1000); const notificationEdited = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); expect(notificationEdited.getElementsByClassName('notification-title')[0].textContent).to.equal(newTitle); expect(notificationEdited.getElementsByClassName('notification-message')[0].textContent).to.equal(newContent); done(); }); it('should remove all notifications', done => { component.addNotification(defaultNotification); component.addNotification(defaultNotification); component.addNotification(defaultNotification); let notification = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notification.length).to.equal(3); component.clearNotifications(); clock.tick(200); let notificationRemoved = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notificationRemoved.length).to.equal(0); done(); }); it('should dismiss notification on click', done => { component.addNotification(notificationObj); let notification = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); TestUtils.Simulate.click(notification); clock.tick(1000); let notificationRemoved = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notificationRemoved.length).to.equal(0); done(); }); it('should dismiss notification on click of dismiss button', done => { component.addNotification(notificationObj); let dismissButton = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification-dismiss'); TestUtils.Simulate.click(dismissButton); clock.tick(1000); let notificationRemoved = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notificationRemoved.length).to.equal(0); done(); }); it('should not render title if not provided', done => { delete notificationObj.title; component.addNotification(notificationObj); let notification = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification-title'); expect(notification.length).to.equal(0);<|fim▁hole|> delete notificationObj.message; component.addNotification(notificationObj); let notification = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification-message'); expect(notification.length).to.equal(0); done(); }); it('should not dismiss the notificaion on click if dismissible is false', done => { notificationObj.dismissible = false; component.addNotification(notificationObj); let notification = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); TestUtils.Simulate.click(notification); let notificationAfterClicked = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); expect(notificationAfterClicked).to.not.be.null; done(); }); it('should not dismiss the notification on click if dismissible is none', done => { notificationObj.dismissible = 'none'; component.addNotification(notificationObj); let notification = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); TestUtils.Simulate.click(notification); let notificationAfterClicked = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); expect(notificationAfterClicked).to.exist; done(); }); it('should not dismiss the notification on click if dismissible is button', done => { notificationObj.dismissible = 'button'; component.addNotification(notificationObj); let notification = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); TestUtils.Simulate.click(notification); let notificationAfterClicked = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); expect(notificationAfterClicked).to.exist; done(); }); it('should render a button if action property is passed', done => { defaultNotification.action = { label: 'Click me', callback: function() {} }; component.addNotification(defaultNotification); let button = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification-action-button'); expect(button).to.not.be.null; done(); }); it('should execute a callback function when notification button is clicked', done => { let testThis = false; notificationObj.action = { label: 'Click me', callback: function() { testThis = true; } }; component.addNotification(notificationObj); let button = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification-action-button'); TestUtils.Simulate.click(button); expect(testThis).to.equal(true); done(); }); it('should accept an action without callback function defined', done => { notificationObj.action = { label: 'Click me' }; component.addNotification(notificationObj); let button = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification-action-button'); TestUtils.Simulate.click(button); let notification = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notification.length).to.equal(0); done(); }); it('should execute a callback function on add a notification', done => { let testThis = false; notificationObj.onAdd = function() { testThis = true; }; component.addNotification(notificationObj); expect(testThis).to.equal(true); done(); }); it('should execute a callback function on remove a notification', done => { let testThis = false; notificationObj.onRemove = function() { testThis = true; }; component.addNotification(notificationObj); let notification = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); TestUtils.Simulate.click(notification); expect(testThis).to.equal(true); done(); }); it('should render a children if passed', done => { defaultNotification.children = ( <div className="custom-container"></div> ); component.addNotification(defaultNotification); let customContainer = TestUtils.findRenderedDOMComponentWithClass(instance, 'custom-container'); expect(customContainer).to.not.be.null; done(); }); it('should pause the timer if a notification has a mouse enter', done => { notificationObj.autoDismiss = 2; component.addNotification(notificationObj); let notification = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); TestUtils.Simulate.mouseEnter(notification); clock.tick(4000); let _notification = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); expect(_notification).to.not.be.null; done(); }); it('should resume the timer if a notification has a mouse leave', done => { notificationObj.autoDismiss = 2; component.addNotification(notificationObj); let notification = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); TestUtils.Simulate.mouseEnter(notification); clock.tick(800); TestUtils.Simulate.mouseLeave(notification); clock.tick(2000); let _notification = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(_notification.length).to.equal(0); done(); }); it('should allow HTML inside messages', done => { defaultNotification.message = '<strong class="allow-html-strong">Strong</strong>'; component.addNotification(defaultNotification); let notification = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification-message'); let htmlElement = notification.getElementsByClassName('allow-html-strong'); expect(htmlElement.length).to.equal(1); done(); }); it('should render containers with a overriden width', done => { notificationObj.position = 'tc'; component.addNotification(notificationObj); let notification = TestUtils.findRenderedDOMComponentWithClass(instance, 'notifications-tc'); let width = notification.style.width; expect(width).to.equal('600px'); done(); }); it('should render a notification with specific style based on position', done => { notificationObj.position = 'bc'; component.addNotification(notificationObj); let notification = TestUtils.findRenderedDOMComponentWithClass(instance, 'notification'); let bottomPosition = notification.style.bottom; expect(bottomPosition).to.equal('-100px'); done(); }); it('should render containers with a overriden width for a specific position', done => { notificationObj.position = 'tl'; component.addNotification(notificationObj); let notification = TestUtils.findRenderedDOMComponentWithClass(instance, 'notifications-tl'); let width = notification.style.width; expect(width).to.equal('800px'); done(); }); it('should throw an error if no level is defined', done => { delete notificationObj.level; expect(() => component.addNotification(notificationObj)).to.throw(/notification level is required/); done(); }); it('should throw an error if a invalid level is defined', done => { notificationObj.level = 'invalid'; expect(() => component.addNotification(notificationObj)).to.throw(/is not a valid level/); done(); }); it('should throw an error if a invalid position is defined', done => { notificationObj.position = 'invalid'; expect(() => component.addNotification(notificationObj)).to.throw(/is not a valid position/); done(); }); it('should throw an error if autoDismiss is not a number', done => { notificationObj.autoDismiss = 'string'; expect(() => component.addNotification(notificationObj)).to.throw(/\'autoDismiss\' must be a number./); done(); }); it('should render 2nd notification below 1st one', done => { component.addNotification(merge({}, defaultNotification, {title: '1st'})); component.addNotification(merge({}, defaultNotification, {title: '2nd'})); const notifications = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notifications[0].getElementsByClassName('notification-title')[0].textContent).to.equal('1st'); expect(notifications[1].getElementsByClassName('notification-title')[0].textContent).to.equal('2nd'); done(); }); }); describe('Notification Component with newOnTop=true', function() { let node; let instance; let component; let clock; let notificationObj; const ref = 'notificationSystem'; this.timeout(10000); beforeEach(() => { // We need to create this wrapper so we can use refs class ElementWrapper extends Component { render() { return <NotificationSystem ref={ ref } style={ style } allowHTML={ true } noAnimation={ true } newOnTop={ true } />; } } node = window.document.createElement("div"); instance = TestUtils.renderIntoDocument(React.createElement(ElementWrapper), node); component = instance.refs[ref]; notificationObj = merge({}, defaultNotification); clock = sinon.useFakeTimers(); }); afterEach(() => { clock.restore(); }); it('should render 2nd notification above 1st one', done => { component.addNotification(merge({}, defaultNotification, {title: '1st'})); component.addNotification(merge({}, defaultNotification, {title: '2nd'})); const notifications = TestUtils.scryRenderedDOMComponentsWithClass(instance, 'notification'); expect(notifications[0].getElementsByClassName('notification-title')[0].textContent).to.equal('2nd'); expect(notifications[1].getElementsByClassName('notification-title')[0].textContent).to.equal('1st'); done(); }); });<|fim▁end|>
done(); }); it('should not render message if not provided', done => {
<|file_name|>DigitalEcho.cpp<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
#include "RhythmComposerOS.h"
<|file_name|>output_writers_end_to_end_test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # # Copyright 2011 Google Inc. All Rights Reserved. import unittest from google.appengine.api import files from google.appengine.ext import db from mapreduce import control from mapreduce import model from mapreduce import output_writers from mapreduce import test_support from testlib import testutil BLOBSTORE_WRITER_NAME = (output_writers.__name__ + "." + output_writers.BlobstoreOutputWriter.__name__) FILE_WRITER_NAME = (output_writers.__name__ + "." + output_writers.FileOutputWriter.__name__) class TestEntity(db.Model): """Test entity class.""" def test_handler_yield_key_str(entity): """Test handler which yields entity key.""" yield str(entity.key()) + "\n" class FileOutputWriterEndToEndTest(testutil.HandlerTestBase): """End-to-end tests for FileOutputWriter using googlestore.""" def testSingleShard(self): entity_count = 1000 for _ in range(entity_count): TestEntity().put() mapreduce_id = control.start_map( "test_map", __name__ + ".test_handler_yield_key_str", "mapreduce.input_readers.DatastoreInputReader", { "entity_kind": __name__ + "." + TestEntity.__name__, "filesystem": "gs", "gs_bucket_name": "bucket" }, shard_count=4, base_path="/mapreduce_base_path", output_writer_spec=FILE_WRITER_NAME) test_support.execute_until_empty(self.taskqueue) mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id) filenames = output_writers.FileOutputWriter.get_filenames(mapreduce_state) self.assertEqual(1, len(filenames)) self.assertTrue(filenames[0].startswith("/gs/bucket/")) with files.open(filenames[0], "r") as f: data = f.read(10000000) self.assertEquals(1000, len(data.strip().split("\n"))) def testDedicatedParams(self): entity_count = 1000 for _ in range(entity_count): TestEntity().put() mapreduce_id = control.start_map( "test_map", __name__ + ".test_handler_yield_key_str", "mapreduce.input_readers.DatastoreInputReader", { "input_reader": { "entity_kind": __name__ + "." + TestEntity.__name__, }, "output_writer": { "filesystem": "gs", "gs_bucket_name": "bucket", }, }, shard_count=4, base_path="/mapreduce_base_path", output_writer_spec=FILE_WRITER_NAME) test_support.execute_until_empty(self.taskqueue) mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id) filenames = output_writers.FileOutputWriter.get_filenames(mapreduce_state) self.assertEqual(1, len(filenames)) self.assertTrue(filenames[0].startswith("/gs/bucket/")) <|fim▁hole|> data = f.read(10000000) self.assertEquals(1000, len(data.strip().split("\n"))) def testMultipleShards(self): entity_count = 1000 for _ in range(entity_count): TestEntity().put() mapreduce_id = control.start_map( "test_map", __name__ + ".test_handler_yield_key_str", "mapreduce.input_readers.DatastoreInputReader", { "entity_kind": __name__ + "." + TestEntity.__name__, "output_sharding": "input", "filesystem": "gs", }, shard_count=4, base_path="/mapreduce_base_path", output_writer_spec=BLOBSTORE_WRITER_NAME) test_support.execute_until_empty(self.taskqueue) mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id) filenames = output_writers.BlobstoreOutputWriter.get_filenames( mapreduce_state) self.assertEqual(4, len(filenames)) file_lengths = [] for filename in filenames: self.assertTrue(filename.startswith("/blobstore/")) self.assertFalse(filename.startswith("/blobstore/writable:")) with files.open(filename, "r") as f: data = f.read(10000000) file_lengths.append(len(data.strip().split("\n"))) # these numbers are totally random and depend on our sharding, # which is quite deterministic. expected_lengths = [199, 210, 275, 316] self.assertEqual(1000, sum(expected_lengths)) self.assertEquals(expected_lengths, file_lengths) class BlobstoreOutputWriterEndToEndTest(testutil.HandlerTestBase): """End-to-end tests for BlobstoreOutputWriter. BlobstoreOutputWriter isn't complex enough yet to do extensive unit tests. Do end-to-end tests just to check that it works. """ def testSingleShard(self): entity_count = 1000 for _ in range(entity_count): TestEntity().put() mapreduce_id = control.start_map( "test_map", __name__ + ".test_handler_yield_key_str", "mapreduce.input_readers.DatastoreInputReader", { "entity_kind": __name__ + "." + TestEntity.__name__, }, shard_count=4, base_path="/mapreduce_base_path", output_writer_spec=BLOBSTORE_WRITER_NAME) test_support.execute_until_empty(self.taskqueue) mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id) filenames = output_writers.BlobstoreOutputWriter.get_filenames( mapreduce_state) self.assertEqual(1, len(filenames)) blob_name = filenames[0] self.assertTrue(blob_name.startswith("/blobstore/")) self.assertFalse(blob_name.startswith("/blobstore/writable:")) with files.open(blob_name, "r") as f: data = f.read(10000000) self.assertEquals(1000, len(data.strip().split("\n"))) def testMultipleShards(self): entity_count = 1000 for _ in range(entity_count): TestEntity().put() mapreduce_id = control.start_map( "test_map", __name__ + ".test_handler_yield_key_str", "mapreduce.input_readers.DatastoreInputReader", { "entity_kind": __name__ + "." + TestEntity.__name__, "output_sharding": "input", }, shard_count=4, base_path="/mapreduce_base_path", output_writer_spec=BLOBSTORE_WRITER_NAME) test_support.execute_until_empty(self.taskqueue) mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id) filenames = output_writers.BlobstoreOutputWriter.get_filenames( mapreduce_state) self.assertEqual(4, len(filenames)) file_lengths = [] for filename in filenames: self.assertTrue(filename.startswith("/blobstore/")) self.assertFalse(filename.startswith("/blobstore/writable:")) with files.open(filename, "r") as f: data = f.read(10000000) file_lengths.append(len(data.strip().split("\n"))) # these numbers are totally random and depend on our sharding, # which is quite deterministic. expected_lengths = [199, 210, 275, 316] self.assertEqual(1000, sum(expected_lengths)) self.assertEquals(expected_lengths, file_lengths) if __name__ == "__main__": unittest.main()<|fim▁end|>
with files.open(filenames[0], "r") as f:
<|file_name|>_permissions_operations.py<|end_file_name|><|fim▁begin|># coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class PermissionsOperations: """PermissionsOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.authorization.v2018_01_01_preview.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list_for_resource_group( self, resource_group_name: str, **kwargs: Any ) -> AsyncIterable["_models.PermissionGetResult"]: """Gets all permissions the caller has for a resource group. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PermissionGetResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2018_01_01_preview.models.PermissionGetResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PermissionGetResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-01-01-preview" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_for_resource_group.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('PermissionGetResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_for_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Authorization/permissions'} # type: ignore def list_for_resource(<|fim▁hole|> parent_resource_path: str, resource_type: str, resource_name: str, **kwargs: Any ) -> AsyncIterable["_models.PermissionGetResult"]: """Gets all permissions the caller has for a resource. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param resource_provider_namespace: The namespace of the resource provider. :type resource_provider_namespace: str :param parent_resource_path: The parent resource identity. :type parent_resource_path: str :param resource_type: The resource type of the resource. :type resource_type: str :param resource_name: The name of the resource to get the permissions for. :type resource_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PermissionGetResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2018_01_01_preview.models.PermissionGetResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PermissionGetResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-01-01-preview" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_for_resource.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1), 'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str', skip_quote=True), 'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True), 'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True), 'resourceName': self._serialize.url("resource_name", resource_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('PermissionGetResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_for_resource.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/permissions'} # type: ignore<|fim▁end|>
self, resource_group_name: str, resource_provider_namespace: str,
<|file_name|>greek_numerals.py<|end_file_name|><|fim▁begin|># coding: utf8 # Greek Numeral Converter<|fim▁hole|># # https://en.wikipedia.org/wiki/Greek_numerals keraia = 'ʹ' # Multiply by 10000 myriads = 'Μ' numbers_to_letters = { 1: 'Α', 2: 'Β', 3: 'Γ', 4: 'Δ', 5: 'Ε', 6: 'ΣΤ', 7: 'Ζ', 8: 'Η', 9: 'Θ', 10: 'Ι', 20: 'Κ', 30: 'Λ', 40: 'Μ', 50: 'Ν', 60: 'Ξ', 70: 'Ο', 80: 'Π', 90: 'Ϟ', 100: 'Ρ', 200: 'Σ', 300: 'Τ', 400: 'Υ', 500: 'Φ', 600: 'Χ', 700: 'Ψ', 800: 'Ω', 900: 'Ϡ', 1000: '͵Α', 2000: '͵Β', 3000: '͵Γ', 4000: '͵Δ', 5000: '͵Ε', 6000: '͵Ϛ', 7000: '͵Z', 8000: '͵H', 9000: '͵Θ', } def to_greek_numeral(num): if num == 0: return '' if num in numbers_to_letters: return numbers_to_letters[num] nums = list(numbers_to_letters.keys()); nums.reverse() initial = [] for n in nums: if num - n >= 0: initial.append(numbers_to_letters[n]) num = num - n return ''.join(initial) + keraia def date_string_to_greek_number(datestring): parts = datestring.split('/') return '/'.join(list(to_greek_numeral(int(p)) for p in parts)) if __name__ == '__main__': import unittest class GreekNumericsTest(unittest.TestCase): def test_numbers(self): for i in range(1, 10): self.assertEqual(numbers_to_letters[i], to_greek_numeral(i)) tests = ( (1983, '͵ΑϠΠΓʹ'), (2017, '͵ΒΙΖʹ'), (13000, '͵Θ͵Δʹ'), (666, 'ΧΞΣΤʹ') ) for n, expected in tests: self.assertEqual(expected, to_greek_numeral(n)) unittest.main()<|fim▁end|>
<|file_name|>AWTChangeNotifier.java<|end_file_name|><|fim▁begin|>/******************************************************************************* * Copyright (c) 1998, 2012 Oracle and/or its affiliates. All rights reserved. * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v1.0 and Eclipse Distribution License v. 1.0 * which accompanies this distribution. * The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html * and the Eclipse Distribution License is available at * http://www.eclipse.org/org/documents/edl-v10.php. * * Contributors: * Oracle - initial API and implementation from Oracle TopLink ******************************************************************************/ package org.eclipse.persistence.tools.workbench.utility.events; import java.awt.EventQueue; import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeListener; import java.io.Serializable; /** * AWT-aware implementation of ChangeNotifier interface: * If we are executing on the AWT event-dispatch thread, * simply forward the change notification directly to the listener. * If we are executing on some other thread, queue up the * notification on the AWT event queue so it can be executed * on the event-dispatch thread (after the pending events have * been dispatched). */ public final class AWTChangeNotifier implements ChangeNotifier, Serializable { // singleton private static ChangeNotifier INSTANCE; private static final long serialVersionUID = 1L; /** * Return the singleton. */ public synchronized static ChangeNotifier instance() { if (INSTANCE == null) { INSTANCE = new AWTChangeNotifier(); } return INSTANCE; } /** * Ensure non-instantiability. */ private AWTChangeNotifier() { super(); } /** * @see ChangeNotifier#stateChanged(StateChangeListener, StateChangeEvent) */ public void stateChanged(final StateChangeListener listener, final StateChangeEvent event) { if (EventQueue.isDispatchThread()) { listener.stateChanged(event); } else { this.invoke( new Runnable() { public void run() { listener.stateChanged(event); } public String toString() { return "stateChanged"; } } ); } } /** * @see ChangeSupport.Notifier#propertyChange(java.beans.PropertyChangeListener, java.beans.PropertyChangeEvent) */ public void propertyChange(final PropertyChangeListener listener, final PropertyChangeEvent event) { if (EventQueue.isDispatchThread()) { listener.propertyChange(event); } else { this.invoke( new Runnable() { public void run() { listener.propertyChange(event); } public String toString() { return "propertyChange"; } } ); } } /** * @see ChangeSupport.Notifier#itemsAdded(CollectionChangeListener, CollectionChangeEvent) */ public void itemsAdded(final CollectionChangeListener listener, final CollectionChangeEvent event) { if (EventQueue.isDispatchThread()) { listener.itemsAdded(event); } else { this.invoke( new Runnable() { public void run() { listener.itemsAdded(event); } public String toString() { return "itemsAdded (Collection)"; } } ); } } /** * @see ChangeSupport.Notifier#itemsRemoved(CollectionChangeListener, CollectionChangeEvent) */ public void itemsRemoved(final CollectionChangeListener listener, final CollectionChangeEvent event) { if (EventQueue.isDispatchThread()) { listener.itemsRemoved(event); } else { this.invoke( new Runnable() { public void run() { listener.itemsRemoved(event); } public String toString() { return "itemsRemoved (Collection)"; } } ); } } /** * @see ChangeSupport.Notifier#collectionChanged(CollectionChangeListener, CollectionChangeEvent) */ public void collectionChanged(final CollectionChangeListener listener, final CollectionChangeEvent event) { if (EventQueue.isDispatchThread()) { listener.collectionChanged(event); } else { this.invoke( new Runnable() { public void run() { listener.collectionChanged(event); } public String toString() { return "collectionChanged"; } } ); } } /** * @see ChangeSupport.Notifier#itemsAdded(ListChangeListener, ListChangeEvent) */ public void itemsAdded(final ListChangeListener listener, final ListChangeEvent event) { if (EventQueue.isDispatchThread()) { listener.itemsAdded(event); } else { this.invoke( new Runnable() { public void run() { listener.itemsAdded(event); } public String toString() { return "itemsAdded (List)"; } } ); } } /** * @see ChangeSupport.Notifier#itemsRemoved(ListChangeListener, ListChangeEvent) */ public void itemsRemoved(final ListChangeListener listener, final ListChangeEvent event) { if (EventQueue.isDispatchThread()) { listener.itemsRemoved(event); } else { this.invoke( new Runnable() { public void run() { listener.itemsRemoved(event); } public String toString() { return "itemsRemoved (List)"; } } ); } } /** * @see ChangeSupport.Notifier#itemsReplaced(ListChangeListener, ListChangeEvent) */ public void itemsReplaced(final ListChangeListener listener, final ListChangeEvent event) { if (EventQueue.isDispatchThread()) { listener.itemsReplaced(event); } else { this.invoke( <|fim▁hole|> public void run() { listener.itemsReplaced(event); } public String toString() { return "itemsReplaced (List)"; } } ); } } /** * @see ChangeSupport.Notifier#listChanged(ListChangeListener, ListChangeEvent) */ public void listChanged(final ListChangeListener listener, final ListChangeEvent event) { if (EventQueue.isDispatchThread()) { listener.listChanged(event); } else { this.invoke( new Runnable() { public void run() { listener.listChanged(event); } public String toString() { return "listChanged"; } } ); } } /** * @see ChangeSupport.Notifier#nodeAdded(TreeChangeListener, TreeChangeEvent) */ public void nodeAdded(final TreeChangeListener listener, final TreeChangeEvent event) { if (EventQueue.isDispatchThread()) { listener.nodeAdded(event); } else { this.invoke( new Runnable() { public void run() { listener.nodeAdded(event); } public String toString() { return "nodeAdded"; } } ); } } /** * @see ChangeSupport.Notifier#nodeRemoved(TreeChangeListener, TreeChangeEvent) */ public void nodeRemoved(final TreeChangeListener listener, final TreeChangeEvent event) { if (EventQueue.isDispatchThread()) { listener.nodeRemoved(event); } else { this.invoke( new Runnable() { public void run() { listener.nodeRemoved(event); } public String toString() { return "nodeRemoved"; } } ); } } /** * @see ChangeSupport.Notifier#treeChanged(TreeChangeListener, TreeChangeEvent) */ public void treeChanged(final TreeChangeListener listener, final TreeChangeEvent event) { if (EventQueue.isDispatchThread()) { listener.treeChanged(event); } else { this.invoke( new Runnable() { public void run() { listener.treeChanged(event); } public String toString() { return "treeChanged"; } } ); } } /** * EventQueue.invokeLater(Runnable) seems to work OK; * but using #invokeAndWait() can somtimes make things * more predictable when debugging. */ private void invoke(Runnable r) { EventQueue.invokeLater(r); // try { // EventQueue.invokeAndWait(r); // } catch (InterruptedException ex) { // throw new RuntimeException(ex); // } catch (java.lang.reflect.InvocationTargetException ex) { // throw new RuntimeException(ex); // } } /** * Serializable singleton support */ private Object readResolve() { return instance(); } }<|fim▁end|>
new Runnable() {
<|file_name|>GridTable.delegate.js<|end_file_name|><|fim▁begin|>/* * ! ${copyright} */ sap.ui.define([ "delegates/odata/v4/TableDelegate", "sap/ui/core/Core" ], function( TableDelegate, Core ) { "use strict"; /** * Test delegate for OData V4. */ var ODataTableDelegate = Object.assign({}, TableDelegate); /** * Updates the binding info with the relevant path and model from the metadata. * * @param {Object} oTable The MDC table instance * @param {Object} oBindingInfo The bindingInfo of the table */ ODataTableDelegate.updateBindingInfo = function(oTable, oBindingInfo) { TableDelegate.updateBindingInfo.apply(this, arguments); var oFilterBar = Core.byId(oTable.getFilter()); if (oFilterBar) { // get the basic search var sSearchText = oFilterBar.getSearch instanceof Function ? oFilterBar.getSearch() : ""; if (sSearchText && sSearchText.indexOf(" ") === -1) { // to allow search for "("..... sSearchText = '"' + sSearchText + '"'; // TODO: escape " in string } // if it contains spaces allow opeartors like OR... oBindingInfo.parameters.$search = sSearchText || undefined;<|fim▁hole|> }; return ODataTableDelegate; });<|fim▁end|>
}
<|file_name|>ax_hpo_iris.py<|end_file_name|><|fim▁begin|>import argparse import mlflow from ax.service.ax_client import AxClient from iris import IrisClassification from iris_data_module import IrisDataModule import pytorch_lightning as pl def train_evaluate(params, max_epochs=100): model = IrisClassification(**params) dm = IrisDataModule() dm.setup(stage="fit") trainer = pl.Trainer(max_epochs=max_epochs) mlflow.pytorch.autolog() trainer.fit(model, dm) trainer.test(datamodule=dm) test_accuracy = trainer.callback_metrics.get("test_acc") return test_accuracy def model_training_hyperparameter_tuning(max_epochs, total_trials, params): """ This function takes input params max_epochs, total_trials, params and creates a nested run in Mlflow. The parameters, metrics, model and summary are dumped into their respective mlflow-run ids. The best parameters are dumped along with the baseline model. :param max_epochs: Max epochs used for training the model. Type:int :param total_trials: Number of ax-client experimental trials. Type:int :param params: Model parameters. Type:dict """ with mlflow.start_run(run_name="Parent Run"): train_evaluate(params=params, max_epochs=max_epochs) ax_client = AxClient() ax_client.create_experiment( parameters=[ {"name": "lr", "type": "range", "bounds": [1e-3, 0.15], "log_scale": True}, {"name": "weight_decay", "type": "range", "bounds": [1e-4, 1e-3]}, {"name": "momentum", "type": "range", "bounds": [0.7, 1.0]}, ], objective_name="test_accuracy", ) for i in range(total_trials): with mlflow.start_run(nested=True, run_name="Trial " + str(i)) as child_run: parameters, trial_index = ax_client.get_next_trial() test_accuracy = train_evaluate(params=parameters, max_epochs=max_epochs) # completion of trial ax_client.complete_trial(trial_index=trial_index, raw_data=test_accuracy.item()) best_parameters, metrics = ax_client.get_best_parameters() for param_name, value in best_parameters.items(): mlflow.log_param("optimum_" + param_name, value)<|fim▁hole|> if __name__ == "__main__": parser = argparse.ArgumentParser() parser = pl.Trainer.add_argparse_args(parent_parser=parser) parser.add_argument( "--total_trials", default=3, help="umber of trials to be run for the optimization experiment", ) args = parser.parse_args() if "max_epochs" in args: max_epochs = args.max_epochs else: max_epochs = 100 params = {"lr": 0.1, "momentum": 0.9, "weight_decay": 0} model_training_hyperparameter_tuning( max_epochs=int(max_epochs), total_trials=int(args.total_trials), params=params )<|fim▁end|>
<|file_name|>ember-weakmap-polyfill.js<|end_file_name|><|fim▁begin|>/* globals Ember, require */ (function() { var _Ember; var id = 0; var dateKey = new Date().getTime(); if (typeof Ember !== 'undefined') { _Ember = Ember; } else { _Ember = require('ember').default; } function symbol() { return '__ember' + dateKey + id++; } function UNDEFINED() {} function FakeWeakMap(iterable) { this._id = symbol(); if (iterable === null || iterable === undefined) { return; } else if (Array.isArray(iterable)) { for (var i = 0; i < iterable.length; i++) { var key = iterable[i][0]; var value = iterable[i][1]; this.set(key, value); } } else { throw new TypeError('The weak map constructor polyfill only supports an array argument'); } } if (!_Ember.WeakMap) { var meta = _Ember.meta; var metaKey = symbol(); /* * @method get * @param key {Object} * @return {*} stored value */ FakeWeakMap.prototype.get = function(obj) { var metaInfo = meta(obj); var metaObject = metaInfo[metaKey]; if (metaInfo && metaObject) { if (metaObject[this._id] === UNDEFINED) { return undefined; } return metaObject[this._id]; } } /* * @method set * @param key {Object} * @param value {Any} * @return {Any} stored value */ FakeWeakMap.prototype.set = function(obj, value) { var type = typeof obj; if (!obj || (type !== 'object' && type !== 'function')) { throw new TypeError('Invalid value used as weak map key'); } var metaInfo = meta(obj); if (value === undefined) { value = UNDEFINED; } if (!metaInfo[metaKey]) { metaInfo[metaKey] = {}; } metaInfo[metaKey][this._id] = value; return this; } /* * @method has * @param key {Object} * @return {Boolean} if the key exists */ FakeWeakMap.prototype.has = function(obj) { var metaInfo = meta(obj); var metaObject = metaInfo[metaKey]; return (metaObject && metaObject[this._id] !== undefined); } /* * @method delete * @param key {Object} */ FakeWeakMap.prototype.delete = function(obj) {<|fim▁hole|> if (this.has(obj)) { delete metaInfo[metaKey][this._id]; return true; } return false; } if (typeof WeakMap === 'function' && typeof window !== 'undefined' && window.OVERRIDE_WEAKMAP !== true) { _Ember.WeakMap = WeakMap; } else { _Ember.WeakMap = FakeWeakMap; } } })();<|fim▁end|>
var metaInfo = meta(obj);
<|file_name|>add-test.js<|end_file_name|><|fim▁begin|>var add = require('./add'); QUnit.test('add example', function () { // add function returns sum of numbers<|fim▁hole|> // it also concatenates strings QUnit.equal(add('foo', 'bar'), 'foobar') });<|fim▁end|>
QUnit.equal(add(2, 3), 5);
<|file_name|>scormdropdownidevice_tmp.py<|end_file_name|><|fim▁begin|># =========================================================================== # eXe # Copyright 2004-2006, University of Auckland # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # =========================================================================== #This is a changed version of the clozeIdevice with selectboxes instead of input fields """ ScormDropDown Idevice. """ import logging from exe.engine.idevice import Idevice from exe.engine.path import Path<|fim▁hole|>from exe.engine.persist import Persistable from HTMLParser import HTMLParser from exe.engine.mimetex import compile from exe.engine.translate import lateTranslate from exe import globals as G #import re import Image from exe.engine.resource import Resource #from exe.engine.flvreader import FLVReader #from htmlentitydefs import name2codepoint #from exe.engine.htmlToText import HtmlToText #from twisted.persisted.styles import Versioned #from exe import globals as G #import os import re #import urllib #import shutil log = logging.getLogger(__name__) #============================================================================== # =========================================================================== # =========================================================================== class ScormDropDownIdevice(Idevice): """ Holds a paragraph with words missing that the student must fill in """ persistenceVersion = 4 def __init__(self, parentNode=None): """ Sets up the idevice title and instructions etc """ Idevice.__init__(self, x_(u"SCORM Test Dropdown"), x_(u"University of Auckland"), x_(u"<p>FillIn exercises are texts or " "sentences where students must fill in " "missing words. They are often used for the " "following purposes:</p>" "<ol>" "<li>To check knowledge of core course " "concepts (this could be a pre-check, " "formative exercise, or summative check).</li>" "<li>To check reading comprehension.</li>" "<li>To check vocabulary knowledge.</li>" "<li>To check word formation and/or grammatical " "competence. </li></ol>"), x_(u"<dl>" " <dt>If your goal is to test understanding " "of core concepts or reading comprehension" " </dt>" " <dd>" " <p>" " Write a summary of the concept or reading long " " enough to adequately test the target's " "knowledge, but short enough not to " "induce fatigue. Less than one typed page is " "probably adequate, but probably " "considerably less for young students or " "beginners." " </p>" " <p>" "Select words in the text that" "are key to understanding the concepts. These" "will probably be verbs, nouns, and key adverbs." "Choose alternatives with one clear answer." " </p>" " </dd>" " <dt>" "If your goal is to test vocabulary knowledge" " </dt>" " <dd>" "<p>Write a text using the target vocabulary. This " "text should be coherent and cohesive, and be of " "an appropriate length. Highlight the target " "words in the text. Choose alternatives with one " "clear answer.</p>" " </dd>" " <dt>" "If your goal is to test word " "formation/grammar:" " </dt>" " <dd>" " <p>" "Write a text using the " "target forms. This text should be coherent and " "cohesive, and be of an appropriate length. " "Remember that the goal is not vocabulary " "knowledge, so the core meanings of the stem " "words should be well known to the students." " </p>" " <p>" "Highlight the target words in the text. Provide " "alternatives with the same word stem, but " "different affixes. It is a good idea to get a " "colleague to test the test/exercise to make " "sure there are no surprises!" " </p>" " </dd>" "</dl>"), u"question", parentNode) self.instructionsForLearners = TextAreaField( x_(u'Instructions'), x_(u"""Hier k&ouml;nnen Sie eine Aufgabenstellung eingeben oder die Standardanweisung &uuml;bernehmen. """), x_(u"""W&auml;hle im folgenden Abschnitt die richtigen Antworten aus!""")) self.instructionsForLearners.idevice = self self._content = ClozeField(x_(u'Cloze'), x_(u"""<p>Um eine L&uuml;cke mit Antwortm&ouml;glichkeiten zu erzeugen, schreiben sie zuerst die richtige Antwort und dann getrennt mit '|' die falschen Antworten, also folgenderma&szlig;en: richtig|falsch|falsch|falsch... Markieren Sie die gesamten Antworten und klicken sie auf den Button 'Wort verbergen/anzeigen'. Hinweise:<br>In Antworten k&ouml;nnen Leerzeichen enthalten sein<br>Das Zeichen '|' erhalten Sie, indem Sie die 'Alt Gr'-Taste gedr&uuml;ckt halten und dann auf die Taste mit dem Zeichen '|' tippen (auf deutschen Tastaturen meist neben dem 'Y'). </p>""")) self._content.idevice = self self.feedback = TextAreaField(x_(u'Feedback'), x_(u'Enter any feedback you wish to provide the learner ' 'with-in the feedback field. This field can be left blank.')) self.feedback.idevice = self self.emphasis = Idevice.SomeEmphasis self.systemResources += ["common.js"] self.isCloze = True # Properties content = property(lambda self: self._content, doc="Read only, use 'self.content.encodedContent = x' " "instead") def upgradeToVersion1(self): """ Upgrades exe to v0.10 """ self._upgradeIdeviceToVersion1() self.instructionsForLearners = TextAreaField( x_(u'Instructions For Learners'), x_(u'Put instructions for learners here'), x_(u'Read the paragraph below and ' 'fill in the missing words')) self.instructionsForLearners.idevice = self self.feedback = TextAreaField(x_(u'Feedback')) self.feedback.idevice = self def upgradeToVersion2(self): """ Upgrades exe to v0.11 """ self.content.autoCompletion = True self.content.autoCompletionInstruc = _(u"Allow auto completion when " u"user filling the gaps.") def upgradeToVersion3(self): """ Upgrades to v0.12 """ self._upgradeIdeviceToVersion2() self.systemResources += ["common.js"] def upgradeToVersion4(self): """ Upgrades to v0.20.3 """ self.isCloze = True # ===========================================================================<|fim▁end|>
from exe.engine.field import ClozeField, TextAreaField
<|file_name|>core.rs<|end_file_name|><|fim▁begin|>pub extern crate nalgebra as na; use std::fmt; use std::ops::{Add, Sub, Index}; use self::na::DMatrix; #[derive(Clone, Debug)] /// A coordinate point in degree, minute, second. pub struct Coordinate { pub deg: i8, pub min: i8, pub sec: f32, } impl Coordinate { /// Create a new coordinate point from its decimal representation. pub fn new(val: &f32) -> Coordinate { let deg = *val as i8; let min = (60. * (*val - deg as f32)) as i8; let sec = 3600. * ((*val - deg as f32) - (min as f32) / 60.); Coordinate { deg: deg, min: min, sec: sec, } } } impl From<Coordinate> for f32 { fn from(coord: Coordinate) -> Self { coord.deg as f32 + coord.min as f32 / 60. + coord.sec / 3600. } } impl fmt::Display for Coordinate { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{: >2}° {: >2}’ {: >6.2}”", self.deg, self.min, self.sec) } } impl Add for Coordinate { type Output = Coordinate; fn add(self, other: Coordinate) -> Coordinate { Coordinate { deg: self.deg - other.deg, min: self.min - other.min, sec: self.sec - other.sec, } } } impl Sub for Coordinate { type Output = Coordinate; fn sub(self, other: Coordinate) -> Coordinate { Coordinate { deg: self.deg + other.deg, min: self.min + other.min, sec: self.sec + other.sec, } } } #[derive(Clone, Debug)] /// A location on Earth, represented by its latitude and longitude. pub struct Location { pub lat: Coordinate, pub lon: Coordinate, } impl fmt::Display for Location { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Lat: {} // Lon: {}", self.lat, self.lon) } } /// The structure representing a Digital Elevation Model. #[derive(Debug)] pub struct Dem { /// The Digital Elevation Model data.<|fim▁hole|> /// Angular resolution of the Digital Elevation Model. pub res: Coordinate, /// Location of the first point of the Digital Elevation Model. pub loc: Location, /// Size of the Digital Elevation Model. pub size: (usize, usize), } pub type DemIndex = (usize, usize); impl Index<DemIndex> for Dem { type Output = i16; fn index<'a>(&'a self, _index: DemIndex) -> &'a i16 { &self.data[_index] } }<|fim▁end|>
pub data: DMatrix<i16>,