prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>timer_timerfd.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Timers based on timerfd_create(2) //! //! On OSes which support timerfd_create, we can use these much more accurate //! timers over select() + a timeout (see timer_other.rs). This strategy still //! employs a worker thread which does the waiting on the timer fds (to send //! messages away). //! //! The worker thread in this implementation uses epoll(7) to block. It //! maintains a working set of *all* native timers in the process, along with a //! pipe file descriptor used to communicate that there is data available on the //! incoming channel to the worker thread. Timers send requests to update their //! timerfd settings to the worker thread (see the comment above 'oneshot' for //! why). //! //! As with timer_other, timers just using sleep() do not use the timerfd at //! all. They remove the timerfd from the worker thread and then invoke //! nanosleep() to block the calling thread. //! //! As with timer_other, all units in this file are in units of millseconds. use std::comm::Data; use libc; use std::ptr; use std::os; use std::rt::rtio; use std::mem; use io::file::FileDesc; use io::IoResult; use io::timer_helper; pub struct Timer { fd: FileDesc, on_worker: bool, } #[allow(visible_private_types)] pub enum Req { NewTimer(libc::c_int, Sender<()>, bool, imp::itimerspec), RemoveTimer(libc::c_int, Sender<()>), Shutdown, } fn helper(input: libc::c_int, messages: Receiver<Req>) { let efd = unsafe { imp::epoll_create(10) }; let _fd1 = FileDesc::new(input, true); let _fd2 = FileDesc::new(efd, true); fn add(efd: libc::c_int, fd: libc::c_int) { let event = imp::epoll_event { events: imp::EPOLLIN as u32, data: fd as i64, }; let ret = unsafe { imp::epoll_ctl(efd, imp::EPOLL_CTL_ADD, fd, &event) }; assert_eq!(ret, 0); } fn del(efd: libc::c_int, fd: libc::c_int) { let event = imp::epoll_event { events: 0, data: 0 }; let ret = unsafe { imp::epoll_ctl(efd, imp::EPOLL_CTL_DEL, fd, &event) }; assert_eq!(ret, 0); } add(efd, input); let events: [imp::epoll_event, ..16] = unsafe { mem::init() }; let mut list: Vec<(libc::c_int, Sender<()>, bool)> = vec![]; 'outer: loop { let n = match unsafe { imp::epoll_wait(efd, events.as_ptr(), events.len() as libc::c_int, -1) } { 0 => fail!("epoll_wait returned immediately!"), -1 if os::errno() == libc::EINTR as int => { continue } -1 => fail!("epoll wait failed: {}", os::last_os_error()), n => n }; let mut incoming = false; for event in events.slice_to(n as uint).iter() { let fd = event.data as libc::c_int; if fd == input { let mut buf = [0, ..1]; // drain the input file descriptor of its input let _ = FileDesc::new(fd, false).inner_read(buf).unwrap(); incoming = true; } else { let mut bits = [0, ..8]; // drain the timerfd of how many times its fired // // FIXME: should this perform a send() this number of // times? let _ = FileDesc::new(fd, false).inner_read(bits).unwrap(); let (remove, i) = { match list.as_slice().bsearch(|&(f, _, _)| f.cmp(&fd)) { Some(i) => { let (_, ref c, oneshot) = *list.get(i); (!c.try_send(()) || oneshot, i) } None => fail!("fd not active: {}", fd), } }; if remove { drop(list.remove(i)); del(efd, fd); } } } while incoming { match messages.try_recv() { Data(NewTimer(fd, chan, one, timeval)) => { // acknowledge we have the new channel, we will never send // another message to the old channel chan.send(()); // If we haven't previously seen the file descriptor, then // we need to add it to the epoll set. match list.as_slice().bsearch(|&(f, _, _)| f.cmp(&fd)) { Some(i) => { drop(mem::replace(list.get_mut(i), (fd, chan, one))); } None => { match list.iter().position(|&(f, _, _)| f >= fd) { Some(i) => list.insert(i, (fd, chan, one)), None => list.push((fd, chan, one)), } add(efd, fd); } } // Update the timerfd's time value now that we have control // of the timerfd let ret = unsafe { imp::timerfd_settime(fd, 0, &timeval, ptr::null()) }; assert_eq!(ret, 0); } Data(RemoveTimer(fd, chan)) => { match list.as_slice().bsearch(|&(f, _, _)| f.cmp(&fd)) { Some(i) => { drop(list.remove(i)); del(efd, fd); } None => {} } chan.send(()); } Data(Shutdown) => { assert!(list.len() == 0); break 'outer; } _ => break, } } } } impl Timer { pub fn new() -> IoResult<Timer> { timer_helper::boot(helper); match unsafe { imp::timerfd_create(imp::CLOCK_MONOTONIC, 0) } { -1 => Err(super::last_error()), n => Ok(Timer { fd: FileDesc::new(n, true), on_worker: false, }), } } pub fn sleep(ms: u64) { let mut to_sleep = libc::timespec { tv_sec: (ms / 1000) as libc::time_t, tv_nsec: ((ms % 1000) * 1000000) as libc::c_long, }; while unsafe { libc::nanosleep(&to_sleep, &mut to_sleep) } != 0 { if os::errno() as int != libc::EINTR as int { fail!("failed to sleep, but not because of EINTR?"); } } } fn remove(&mut self) { if !self.on_worker { return } let (tx, rx) = channel(); timer_helper::send(RemoveTimer(self.fd.fd(), tx)); rx.recv(); self.on_worker = false; } } impl rtio::RtioTimer for Timer { fn sleep(&mut self, msecs: u64) { self.remove(); Timer::sleep(msecs); } // Periodic and oneshot channels are updated by updating the settings on the // corresopnding timerfd. The update is not performed on the thread calling // oneshot or period, but rather the helper epoll thread. The reason for // this is to avoid losing messages and avoid leaking messages across ports. // // By updating the timerfd on the helper thread, we're guaranteed that all // messages for a particular setting of the timer will be received by the // new channel/port pair rather than leaking old messages onto the new port // or leaking new messages onto the old port. // // We also wait for the remote thread to actually receive the new settings // before returning to guarantee the invariant that when oneshot() and // period() return that the old port will never receive any more messages. fn oneshot(&mut self, msecs: u64) -> Receiver<()> { let (tx, rx) = channel(); let new_value = imp::itimerspec { it_interval: imp::timespec { tv_sec: 0, tv_nsec: 0 }, it_value: imp::timespec { tv_sec: (msecs / 1000) as libc::time_t, tv_nsec: ((msecs % 1000) * 1000000) as libc::c_long, } }; timer_helper::send(NewTimer(self.fd.fd(), tx, true, new_value)); rx.recv(); self.on_worker = true; return rx; } fn period(&mut self, msecs: u64) -> Receiver<()> { let (tx, rx) = channel(); let spec = imp::timespec { tv_sec: (msecs / 1000) as libc::time_t, tv_nsec: ((msecs % 1000) * 1000000) as libc::c_long, }; let new_value = imp::itimerspec { it_interval: spec, it_value: spec, }; timer_helper::send(NewTimer(self.fd.fd(), tx, false, new_value)); rx.recv(); self.on_worker = true; return rx; } } impl Drop for Timer {<|fim▁hole|> fn drop(&mut self) { // When the timerfd file descriptor is closed, it will be automatically // removed from the epoll set of the worker thread, but we want to make // sure that the associated channel is also removed from the worker's // hash map. self.remove(); } } #[allow(dead_code)] mod imp { use libc; pub static CLOCK_MONOTONIC: libc::c_int = 1; pub static EPOLL_CTL_ADD: libc::c_int = 1; pub static EPOLL_CTL_DEL: libc::c_int = 2; pub static EPOLL_CTL_MOD: libc::c_int = 3; pub static EPOLLIN: libc::c_int = 0x001; pub static EPOLLOUT: libc::c_int = 0x004; pub static EPOLLPRI: libc::c_int = 0x002; pub static EPOLLERR: libc::c_int = 0x008; pub static EPOLLRDHUP: libc::c_int = 0x2000; pub static EPOLLET: libc::c_int = 1 << 31; pub static EPOLLHUP: libc::c_int = 0x010; pub static EPOLLONESHOT: libc::c_int = 1 << 30; #[cfg(target_arch = "x86_64")] #[packed] pub struct epoll_event { pub events: u32, pub data: i64, } #[cfg(not(target_arch = "x86_64"))] pub struct epoll_event { pub events: u32, pub data: i64, } pub struct timespec { pub tv_sec: libc::time_t, pub tv_nsec: libc::c_long, } pub struct itimerspec { pub it_interval: timespec, pub it_value: timespec, } extern { pub fn timerfd_create(clockid: libc::c_int, flags: libc::c_int) -> libc::c_int; pub fn timerfd_settime(fd: libc::c_int, flags: libc::c_int, new_value: *itimerspec, old_value: *itimerspec) -> libc::c_int; pub fn timerfd_gettime(fd: libc::c_int, curr_value: *itimerspec) -> libc::c_int; pub fn epoll_create(size: libc::c_int) -> libc::c_int; pub fn epoll_ctl(epfd: libc::c_int, op: libc::c_int, fd: libc::c_int, event: *epoll_event) -> libc::c_int; pub fn epoll_wait(epfd: libc::c_int, events: *epoll_event, maxevents: libc::c_int, timeout: libc::c_int) -> libc::c_int; } }<|fim▁end|>
<|file_name|>comments.rs<|end_file_name|><|fim▁begin|>// rustfmt-emit_mode: coverage /// Here's a doc comment! fn main() {<|fim▁hole|> // foo is bar let foo = "bar"; // loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong comment!!!!! }<|fim▁end|>
<|file_name|>tokens.py<|end_file_name|><|fim▁begin|># Copyright (C) 2014-2016 Andrey Antukh <[email protected]> # Copyright (C) 2014-2016 Jesús Espino <[email protected]> # Copyright (C) 2014-2016 David Barragán <[email protected]> # Copyright (C) 2014-2016 Alejandro Alonso <[email protected]> # This program is free software: you can redistribute it and/or modify<|fim▁hole|># it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from taiga.base import exceptions as exc from django.apps import apps from django.core import signing from django.utils.translation import ugettext as _ def get_token_for_user(user, scope): """ Generate a new signed token containing a specified user limited for a scope (identified as a string). """ data = {"user_%s_id" % (scope): user.id} return signing.dumps(data) def get_user_for_token(token, scope, max_age=None): """ Given a selfcontained token and a scope try to parse and unsign it. If max_age is specified it checks token expiration. If token passes a validation, returns a user instance corresponding with user_id stored in the incoming token. """ try: data = signing.loads(token, max_age=max_age) except signing.BadSignature: raise exc.NotAuthenticated(_("Invalid token")) model_cls = apps.get_model("users", "User") try: user = model_cls.objects.get(pk=data["user_%s_id" % (scope)]) except (model_cls.DoesNotExist, KeyError): raise exc.NotAuthenticated(_("Invalid token")) else: return user<|fim▁end|>
<|file_name|>TemplateImplUtil.java<|end_file_name|><|fim▁begin|>/* * Copyright 2000-2015 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.codeInsight.template.impl; import com.intellij.psi.tree.IElementType; import com.intellij.util.containers.hash.LinkedHashMap; /** * @author Maxim.Mossienko */<|fim▁hole|> LinkedHashMap<String, Variable> variables = new LinkedHashMap<String, Variable>(); TemplateTextLexer lexer = new TemplateTextLexer(); lexer.start(text); while (true) { IElementType tokenType = lexer.getTokenType(); if (tokenType == null) break; int start = lexer.getTokenStart(); int end = lexer.getTokenEnd(); String token = text.subSequence(start, end).toString(); if (tokenType == TemplateTokenType.VARIABLE) { String name = token.substring(1, token.length() - 1); if (!variables.containsKey(name)) { variables.put(name, new Variable(name, "", "", true)); } } lexer.advance(); } return variables; } public static boolean isValidVariableName(String varName) { return parseVariables("$" + varName + "$").containsKey(varName); } }<|fim▁end|>
public class TemplateImplUtil { public static LinkedHashMap<String, Variable> parseVariables(CharSequence text) {
<|file_name|>cluster_provider.py<|end_file_name|><|fim▁begin|>import logging from abc import abstractmethod from string import Template from parsl.providers.error import SchedulerMissingArgs, ScriptPathError from parsl.launchers.error import BadLauncher from parsl.providers.provider_base import ExecutionProvider logger = logging.getLogger(__name__) class ClusterProvider(ExecutionProvider): """ This class defines behavior common to all cluster/supercompute-style scheduler systems. Parameters ---------- label : str Label for this provider. channel : Channel Channel for accessing this provider. Possible channels include :class:`~parsl.channels.LocalChannel` (the default), :class:`~parsl.channels.SSHChannel`, or :class:`~parsl.channels.SSHInteractiveLoginChannel`. walltime : str Walltime requested per block in HH:MM:SS. launcher : Launcher Launcher for this provider. cmd_timeout : int Timeout for commands made to the scheduler in seconds .. code:: python +------------------ | script_string ------->| submit id <--------|---+ | [ ids ] ------->| status [statuses] <--------|----+ | [ ids ] ------->| cancel [cancel] <--------|----+ | +------------------- """ def __init__(self, label, channel, nodes_per_block, init_blocks, min_blocks, max_blocks, parallelism, walltime,<|fim▁hole|> cmd_timeout=10): self._label = label self.channel = channel self.nodes_per_block = nodes_per_block self.init_blocks = init_blocks self.min_blocks = min_blocks self.max_blocks = max_blocks self.parallelism = parallelism self.launcher = launcher self.walltime = walltime self.cmd_timeout = cmd_timeout if not callable(self.launcher): raise(BadLauncher(self.launcher, "Launcher for executor: {} is of type: {}. Expects a parsl.launcher.launcher.Launcher or callable".format( label, type(self.launcher)))) self.script_dir = None # Dictionary that keeps track of jobs, keyed on job_id self.resources = {} def execute_wait(self, cmd, timeout=None): t = self.cmd_timeout if timeout is not None: t = timeout return self.channel.execute_wait(cmd, t) def _write_submit_script(self, template, script_filename, job_name, configs): """Generate submit script and write it to a file. Args: - template (string) : The template string to be used for the writing submit script - script_filename (string) : Name of the submit script - job_name (string) : job name - configs (dict) : configs that get pushed into the template Returns: - True: on success Raises: SchedulerMissingArgs : If template is missing args ScriptPathError : Unable to write submit script out """ try: submit_script = Template(template).substitute(jobname=job_name, **configs) # submit_script = Template(template).safe_substitute(jobname=job_name, **configs) with open(script_filename, 'w') as f: f.write(submit_script) except KeyError as e: logger.error("Missing keys for submit script : %s", e) raise (SchedulerMissingArgs(e.args, self.label)) except IOError as e: logger.error("Failed writing to submit script: %s", script_filename) raise (ScriptPathError(script_filename, e)) except Exception as e: print("Template : ", template) print("Args : ", job_name) print("Kwargs : ", configs) logger.error("Uncategorized error: %s", e) raise (e) return True @abstractmethod def _status(self): pass def status(self, job_ids): """ Get the status of a list of jobs identified by the job identifiers returned from the submit request. Args: - job_ids (list) : A list of job identifiers Returns: - A list of JobStatus objects corresponding to each job_id in the job_ids list. Raises: - ExecutionProviderException or its subclasses """ if job_ids: self._status() return [self.resources[jid]['status'] for jid in job_ids] @property def label(self): return self._label<|fim▁end|>
launcher,
<|file_name|>fo.js<|end_file_name|><|fim▁begin|>/* Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.md or http://ckeditor.com/license */ CKEDITOR.plugins.setLang( 'flash', 'fo', { <|fim▁hole|> accessNever: 'Ongantíð', accessSameDomain: 'Sama navnaøki', alignAbsBottom: 'Abs botnur', alignAbsMiddle: 'Abs miðja', alignBaseline: 'Basislinja', alignTextTop: 'Tekst toppur', bgcolor: 'Bakgrundslitur', chkFull: 'Loyv fullan skerm', chkLoop: 'Endurspæl', chkMenu: 'Ger Flash skrá virkna', chkPlay: 'Avspælingin byrjar sjálv', flashvars: 'Variablar fyri Flash', hSpace: 'Høgri breddi', properties: 'Flash eginleikar', propertiesTab: 'Eginleikar', quality: 'Góðska', qualityAutoHigh: 'Auto høg', qualityAutoLow: 'Auto Lág', qualityBest: 'Besta', qualityHigh: 'Høg', qualityLow: 'Lág', qualityMedium: 'Meðal', scale: 'Skalering', scaleAll: 'Vís alt', scaleFit: 'Neyv skalering', scaleNoBorder: 'Eingin bordi', title: 'Flash eginleikar', vSpace: 'Vinstri breddi', validateHSpace: 'HSpace má vera eitt tal.', validateSrc: 'Vinarliga skriva tilknýti (URL)', validateVSpace: 'VSpace má vera eitt tal.', windowMode: 'Slag av rúti', windowModeOpaque: 'Ikki transparent', windowModeTransparent: 'Transparent', windowModeWindow: 'Rútur' });<|fim▁end|>
access: 'Script atgongd', accessAlways: 'Altíð',
<|file_name|>getDeclarationTypeText.spec.ts<|end_file_name|><|fim▁begin|>import { __String, SignatureDeclaration } from 'typescript'; import { TsParser } from '.'; import { getDeclarationTypeText } from './getDeclarationTypeText'; const path = require('canonical-path'); describe('getDeclarationTypeText', () => { let parser: TsParser; let basePath: string; beforeEach(() => { parser = new TsParser(require('dgeni/lib/mocks/log')(false)); basePath = path.resolve(__dirname, '../../mocks'); }); <|fim▁hole|> expect(getDeclarationTypeText(getExport('testConst').getDeclarations()![0])).toEqual('42'); const testFunction = getExport('testFunction').getDeclarations()![0] as SignatureDeclaration; expect(getDeclarationTypeText(testFunction)).toEqual('number'); expect(getDeclarationTypeText(testFunction.parameters[0])).toEqual('T[]'); expect(getDeclarationTypeText(testFunction.typeParameters![0])).toEqual('T'); const testClass = getExport('TestClass'); const testClassDeclaration = testClass.getDeclarations()![0] as SignatureDeclaration; expect(getDeclarationTypeText(testClass.members!.get('prop1' as __String)!.getDeclarations()![0])).toEqual('T[]'); expect(getDeclarationTypeText(testClass.members!.get('prop2' as __String)!.getDeclarations()![0])).toEqual('OtherClass<T>'); expect(getDeclarationTypeText(testClass.members!.get('prop3' as __String)!.getDeclarations()![0])).toEqual('OtherClass<T, T>'); expect(getDeclarationTypeText(testClass.members!.get('method'as __String)!.getDeclarations()![0])).toEqual('T'); expect(getDeclarationTypeText(testClassDeclaration.typeParameters![0])).toEqual('T = any'); function getExport(name: string) { return moduleExports.find(e => e.name === name)!; } }); });<|fim▁end|>
it('should return a textual representation of the type the declaration', () => { const parseInfo = parser.parse(['tsParser/getDeclarationTypeText.test.ts'], basePath); const moduleExports = parseInfo.moduleSymbols[0].exportArray;
<|file_name|>route_stitching.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (C) 2020 The SymbiFlow Authors. # # Use of this source code is governed by a ISC-style # license that can be found in the LICENSE file or at # https://opensource.org/licenses/ISC # # SPDX-License-Identifier: ISC """ This file defines the RoutingTree class which can be used for constructing routing trees for route segments from the fpga_interchange.physical_netlist class PhysicalBelPin/PhysicalSitePin/PhysicalSitePip/PhysicalPip. Use of the RoutingTree requires having the DeviceResources class loaded for the relevant part for the design. Use interchange_capnp.Interchange.read_device_resources to load a device resource file. """ def create_id_map(id_to_segment, segments): """ Create or update dict from object ids of segments to segments. """ for segment in segments: segment_id = id(segment) assert segment_id not in id_to_segment id_to_segment[segment_id] = segment create_id_map(id_to_segment, segment.branches) def check_tree(routing_tree, segment): """ Recursively checks a routing tree. Checks for: - Circular routing trees - Child segments are connected to their parents. """ # Check for circular routing tree for _ in yield_branches(segment): pass # Ensure children are connected to parent. root_resource = routing_tree.get_device_resource(segment) for child in segment.branches: child_resource = routing_tree.get_device_resource(child) assert root_resource.is_connected(child_resource), (str(segment), str(child), root_resource, child_resource) check_tree(routing_tree, child) def yield_branches(routing_branch): """ Yield all routing branches starting from the given route segment. This will yield the input route branch in addition to its children. An AssertionError will be raised for a circular route is detected. """ objs = set() def descend(obj): obj_id = id(obj) assert obj_id not in objs objs.add(obj_id) yield obj for seg in obj.branches: for s in descend(seg): yield s for s in descend(routing_branch): yield s def sort_branches(branches): """ Sort branches by the branch tuple. The branch tuple is: ('bel_pin'/'site_pin'/'site_pip'/'pip', <site>/<tile>, ...) so sorting in this way ensures that BEL pins are grouped, etc. This also canonicalize the branch order, which makes comparing trees each, just normalize both trees, and compare the result. """ branches.sort(key=lambda item: item.to_tuple()) def get_tuple_tree(root_branch): """ Convert a rout branch in a two tuple. """ return root_branch.to_tuple(), tuple( get_tuple_tree(branch) for branch in root_branch.branches) class RoutingTree(): """ Utility class for managing stitching of a routing tree. """ def __init__(self, device_resources, site_types, stubs, sources): # Check that no duplicate routing resources are present. tuple_to_id = {} for stub in stubs: for branch in yield_branches(stub): tup = branch.to_tuple() assert tup not in tuple_to_id, tup tuple_to_id[tup] = id(branch) for source in sources: for branch in yield_branches(source): tup = branch.to_tuple() assert tup not in tuple_to_id, tup tuple_to_id[tup] = id(branch) self.id_to_segment = {} self.id_to_device_resource = {} self.stubs = stubs self.sources = sources self.connections = None # Populate id_to_segment and id_to_device_resource maps. create_id_map(self.id_to_segment, self.stubs) create_id_map(self.id_to_segment, self.sources) for segment_id, segment in self.id_to_segment.items(): self.id_to_device_resource[ segment_id] = segment.get_device_resource( site_types, device_resources) # Verify initial input makes sense. self.check_trees() def segment_for_id(self, segment_id): """ Get routing segment based on the object id of the routing segment. """ return self.id_to_segment[segment_id] <|fim▁hole|> """ Normalize the routing tree by sorted element. """ sort_branches(self.stubs) sort_branches(self.sources) for stub in self.stubs: for branch in yield_branches(stub): sort_branches(branch.branches) for source in self.sources: for branch in yield_branches(source): sort_branches(branch.branches) def get_tuple_tree(self): """ Get tuple tree representation of the current routing tree. This is suitable for equality checking if normalized with normalize_tree. """ return (tuple(get_tuple_tree(stub) for stub in self.stubs), tuple(get_tuple_tree(source) for source in self.sources)) def get_device_resource_for_id(self, segment_id): """ Get the device resource that corresponds to the segment id given. """ return self.id_to_device_resource[segment_id] def get_device_resource(self, segment): """ Get the device resource that corresponds to the segment given. """ return self.id_to_device_resource[id(segment)] def check_trees(self): """ Check that the routing tree at and below obj is valid. This method should be called after all route segments have been added to the node cache. """ for stub in self.stubs: check_tree(self, stub) for source in self.sources: assert self.get_device_resource(source).is_root(), source check_tree(self, source) def connections_for_segment_id(self, segment_id): """ Yield all connection resources connected to segment id given. """ resource = self.id_to_device_resource[segment_id] for site_wire in resource.site_wires(): yield site_wire for node in resource.nodes(): yield node def build_connections(self): """ Create a dictionary of connection resources to segment ids. """ self.connections = {} for segment_id in self.id_to_segment.keys(): for connection in self.connections_for_segment_id(segment_id): if connection not in self.connections: self.connections[connection] = set() self.connections[connection].add(segment_id) def get_connection(self, connection_resource): """ Get list of segment ids connected to connection_resource. """ if self.connections is None: self.build_connections() return self.connections[connection_resource] def reroot(self): """ Determine which routing segments are roots and non-roots. Repopulates stubs and sources list with new roots and non-root segments. """ if self.connections is None: self.build_connections() segments = self.stubs + self.sources self.stubs.clear() self.sources.clear() source_segment_ids = set() # Example each connection and find the best root. for segment_ids in self.connections.values(): root_priority = None root = None root_count = 0 for segment_id in segment_ids: resource = self.get_device_resource_for_id(segment_id) if resource.is_root(): possible_root_priority = resource.root_priority() if root is None: root_priority = possible_root_priority root = segment_id root_count = 1 elif possible_root_priority < root_priority: root_priority = possible_root_priority root = segment_id root_count = 1 elif possible_root_priority == root_priority: root_count += 1 if root is not None: # Generate an error if multiple segments could be a root. # This should only occur near IO pads. In most cases, the # root should be the only Direction.Output BEL pin on the site # wire. assert root_count == 1 source_segment_ids.add(root) for segment in segments: if id(segment) in source_segment_ids: self.sources.append(segment) else: self.stubs.append(segment) def attach(self, parent_id, child_id): """ Attach a child routing tree to the routing tree for parent. """ assert self.id_to_device_resource[parent_id].is_connected( self.id_to_device_resource[child_id]) self.id_to_segment[parent_id].branches.append( self.id_to_segment[child_id]) def check_count(self): """ Verify that every segment is reachable from stubs and sources list. This check ensures no routing segment is orphaned during processing. """ count = 0 for stub in self.stubs: for _ in yield_branches(stub): count += 1 for source in self.sources: for _ in yield_branches(source): count += 1 assert len(self.id_to_segment) == count def attach_candidates(routing_tree, id_to_idx, stitched_stubs, objs_to_attach, route_branch, visited): """ Attach children of branches in the routing tree route_branch. routing_tree : RoutingTree A node cache that contains all routing branches in the net. id_to_idx : dict object id to int Map of object id to idx in a list of unstitched routing branches. stitched_stubs : set of int Set of indicies of stubs that have been stitched. Used to track which stubs have been stitched into the tree, and verify stubs are not stitched twice into the tree. objs_to_attach : list of parent object id to child object id When attach_candidates finds a stub that should be stitched into the routing tree, rather than stitch it immediately, it adds a parent of (id(parent), id(child)) to objs_to_attach. This deferal enables the traversal of the input routing tree without modification. After attach_candidates returns, elements of objs_to_attach should be passed to routing_tree.attach to join the trees. obj : PhysicalBelPin/PhysicalSitePin/PhysicalSitePip/PhysicalPip Root of routing tree to iterate over to identify candidates to attach to routing tree.. visited : set of ids to routing branches. """ root_obj_id = id(route_branch) assert root_obj_id not in id_to_idx for branch in yield_branches(route_branch): # Make sure each route branch is only visited once. assert id(branch) not in visited visited.add(id(branch)) for connection in routing_tree.connections_for_segment_id(id(branch)): for segment_id in routing_tree.get_connection(connection): if id(branch) == segment_id: continue if segment_id not in id_to_idx: continue # There should never be a loop because root_obj_id should not # be in the id_to_idx map once it is stitched into another tree. assert root_obj_id != segment_id if not routing_tree.get_device_resource(branch).is_connected( routing_tree.get_device_resource_for_id(segment_id)): continue idx = id_to_idx[segment_id] if idx in stitched_stubs: assert segment_id in objs_to_attach proposed_parent = id(branch) old_parent = objs_to_attach[segment_id] assert old_parent == proposed_parent, ( str(routing_tree.segment_for_id(proposed_parent)), str(routing_tree.segment_for_id(old_parent)), str(routing_tree.segment_for_id(segment_id))) else: stitched_stubs.add(idx) objs_to_attach[segment_id] = id(branch) def attach_from_parents(routing_tree, id_to_idx, parents, visited): """ Attach children routing tree starting from list of parent routing trees. routing_tree : RoutingTree A node cache that contains all routing branches in the net. id_to_idx : dict object id to int Map of object id to idx in a list of unstitched routing branches. parents : list of PhysicalBelPin/PhysicalSitePin/PhysicalSitePip/PhysicalPip Roots of routing tree to search for children trees. visited : set of ids to routing branches. Returns set of indicies to stitched stubs. """ objs_to_attach = {} stitched_stubs = set() for parent in parents: attach_candidates( routing_tree=routing_tree, id_to_idx=id_to_idx, stitched_stubs=stitched_stubs, objs_to_attach=objs_to_attach, route_branch=parent, visited=visited) for child_id, branch_id in objs_to_attach.items(): # The branch_id should not be in the id_to_idx map, because it should # be an outstanding stub. assert branch_id not in id_to_idx # The child_id should be in the id_to_idx map, because it should be an # outstanding stub. assert child_id in id_to_idx routing_tree.attach(branch_id, child_id) stitched_stubs.add(id_to_idx[child_id]) del id_to_idx[child_id] # Return the newly stitched stubs, so that they form the new parent list. return stitched_stubs def stitch_segments(device_resources, site_types, segments): """ Stitch segments of the routing tree into trees rooted from net sources. """ routing_tree = RoutingTree( device_resources, site_types, stubs=segments, sources=[]) routing_tree.reroot() # Create a id to idx map so that stitching can be deferred when walking # trees id_to_idx = {} for idx, stub in enumerate(routing_tree.stubs): assert idx not in id_to_idx id_to_idx[id(stub)] = idx # Initial set of tree parents are just the sources parents = routing_tree.sources stitched_stubs = set() # Track visited nodes, as it is expected to never visit a route branch # more than once. visited = set() # Continue iterating until no more stubs are stitched. while len(parents) > 0: # Starting from the parents of the current tree, add stubs the # descend from this set, and create a new set of parents from those # stubs. newly_stitched_stubs = attach_from_parents(routing_tree, id_to_idx, parents, visited) # Mark the newly stitched stubs to be removed. stitched_stubs |= newly_stitched_stubs # New set of parents using from the newly stitched stubs. parents = [routing_tree.stubs[idx] for idx in newly_stitched_stubs] # Remove stitched stubs from stub list for idx in sorted(stitched_stubs, reverse=True): del routing_tree.stubs[idx] # Make sure new trees are sensible. routing_tree.check_trees() routing_tree.check_count() return routing_tree.sources, routing_tree.stubs def flatten_segments(segments): """ Take a list of routing segments and flatten out any children. """ output = [] for segment in segments: for branch in yield_branches(segment): output.append(branch) for segment in output: segment.branches.clear() return output<|fim▁end|>
def normalize_tree(self):
<|file_name|>views.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*- __author__ = 'ray' __date__ = '2/27/15' from flask import jsonify, abort from flask.views import MethodView from ..models import ThemeModel <|fim▁hole|>class ThemeView(MethodView): """ Theme View Retrieve description of a list of available themes. :param theme_model: A theme model that manages themes. :type theme_model: :class:`~stonemason.service.models.ThemeModel` """ def __init__(self, theme_model): assert isinstance(theme_model, ThemeModel) self._theme_model = theme_model def get(self, tag): """Return description of the theme. Raise :http:statuscode:`404` if not found. :param name: Name of a theme. :type name: str """ if tag is None: collection = list() for theme in self._theme_model.iter_themes(): collection.append(theme.to_dict()) return jsonify(result=collection) else: theme = self._theme_model.get_theme(tag) if theme is None: abort(404) return jsonify(result=theme.to_dict())<|fim▁end|>
<|file_name|>windowing.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Abstract windowing methods. The concrete implementations of these can be found in `platform/`. use canvas::{SurfaceProviders, WebGlExecutor}; use embedder_traits::EventLoopWaker; use euclid::Scale; #[cfg(feature = "gl")] use gleam::gl; use keyboard_types::KeyboardEvent; use msg::constellation_msg::{PipelineId, TopLevelBrowsingContextId, TraversalDirection}; use script_traits::{MediaSessionActionType, MouseButton, TouchEventType, TouchId, WheelDelta}; use servo_geometry::DeviceIndependentPixel; use servo_media::player::context::{GlApi, GlContext, NativeDisplay}; use servo_url::ServoUrl; use std::fmt::{Debug, Error, Formatter}; #[cfg(feature = "gl")] use std::rc::Rc; use std::time::Duration; use style_traits::DevicePixel; use rust_webvr::VRServiceManager; use webrender_api::units::DevicePoint; use webrender_api::units::{DeviceIntPoint, DeviceIntRect, DeviceIntSize}; use webrender_api::ScrollLocation; use webvr_traits::WebVRMainThreadHeartbeat; #[derive(Clone)] pub enum MouseWindowEvent { Click(MouseButton, DevicePoint), MouseDown(MouseButton, DevicePoint), MouseUp(MouseButton, DevicePoint), } /// Various debug and profiling flags that WebRender supports. #[derive(Clone)] pub enum WebRenderDebugOption { Profiler, TextureCacheDebug, RenderTargetDebug, } /// Events that the windowing system sends to Servo. #[derive(Clone)] pub enum WindowEvent { /// Sent when no message has arrived, but the event loop was kicked for some reason (perhaps /// by another Servo subsystem). /// /// FIXME(pcwalton): This is kind of ugly and may not work well with multiprocess Servo. /// It's possible that this should be something like /// `CompositorMessageWindowEvent(compositor_thread::Msg)` instead. Idle, /// Sent when part of the window is marked dirty and needs to be redrawn. Before sending this /// message, the window must make the same GL context as in `PrepareRenderingEvent` current. Refresh, /// Sent when the window is resized. Resize, /// Sent when a navigation request from script is allowed/refused. AllowNavigationResponse(PipelineId, bool), /// Sent when a new URL is to be loaded. LoadUrl(TopLevelBrowsingContextId, ServoUrl), /// Sent when a mouse hit test is to be performed. MouseWindowEventClass(MouseWindowEvent), /// Sent when a mouse move. MouseWindowMoveEventClass(DevicePoint), /// Touch event: type, identifier, point Touch(TouchEventType, TouchId, DevicePoint), /// Sent when user moves the mouse wheel. Wheel(WheelDelta, DevicePoint), /// Sent when the user scrolls. The first point is the delta and the second point is the /// origin. Scroll(ScrollLocation, DeviceIntPoint, TouchEventType), /// Sent when the user zooms. Zoom(f32), /// Simulated "pinch zoom" gesture for non-touch platforms (e.g. ctrl-scrollwheel). PinchZoom(f32), /// Sent when the user resets zoom to default. ResetZoom, /// Sent when the user uses chrome navigation (i.e. backspace or shift-backspace). Navigation(TopLevelBrowsingContextId, TraversalDirection), /// Sent when the user quits the application Quit, /// Sent when the user exits from fullscreen mode ExitFullScreen(TopLevelBrowsingContextId), /// Sent when a key input state changes Keyboard(KeyboardEvent), /// Sent when Ctr+R/Apple+R is called to reload the current page. Reload(TopLevelBrowsingContextId), /// Create a new top level browsing context NewBrowser(ServoUrl, TopLevelBrowsingContextId), /// Close a top level browsing context CloseBrowser(TopLevelBrowsingContextId), /// Panic a top level browsing context. SendError(Option<TopLevelBrowsingContextId>, String), /// Make a top level browsing context visible, hiding the previous /// visible one. SelectBrowser(TopLevelBrowsingContextId), /// Toggles a debug flag in WebRender ToggleWebRenderDebug(WebRenderDebugOption), /// Capture current WebRender CaptureWebRender, /// Toggle sampling profiler with the given sampling rate and max duration. ToggleSamplingProfiler(Duration, Duration), /// Sent when the user triggers a media action through the UA exposed media UI /// (play, pause, seek, etc.). MediaSessionAction(MediaSessionActionType), /// Set browser visibility. A hidden browser will not tick the animations. ChangeBrowserVisibility(TopLevelBrowsingContextId, bool), } impl Debug for WindowEvent { fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { match *self { WindowEvent::Idle => write!(f, "Idle"), WindowEvent::Refresh => write!(f, "Refresh"), WindowEvent::Resize => write!(f, "Resize"), WindowEvent::Keyboard(..) => write!(f, "Keyboard"), WindowEvent::AllowNavigationResponse(..) => write!(f, "AllowNavigationResponse"), WindowEvent::LoadUrl(..) => write!(f, "LoadUrl"), WindowEvent::MouseWindowEventClass(..) => write!(f, "Mouse"), WindowEvent::MouseWindowMoveEventClass(..) => write!(f, "MouseMove"), WindowEvent::Touch(..) => write!(f, "Touch"), WindowEvent::Wheel(..) => write!(f, "Wheel"), WindowEvent::Scroll(..) => write!(f, "Scroll"), WindowEvent::Zoom(..) => write!(f, "Zoom"), WindowEvent::PinchZoom(..) => write!(f, "PinchZoom"), WindowEvent::ResetZoom => write!(f, "ResetZoom"), WindowEvent::Navigation(..) => write!(f, "Navigation"), WindowEvent::Quit => write!(f, "Quit"), WindowEvent::Reload(..) => write!(f, "Reload"), WindowEvent::NewBrowser(..) => write!(f, "NewBrowser"), WindowEvent::SendError(..) => write!(f, "SendError"), WindowEvent::CloseBrowser(..) => write!(f, "CloseBrowser"), WindowEvent::SelectBrowser(..) => write!(f, "SelectBrowser"), WindowEvent::ToggleWebRenderDebug(..) => write!(f, "ToggleWebRenderDebug"), WindowEvent::CaptureWebRender => write!(f, "CaptureWebRender"), WindowEvent::ToggleSamplingProfiler(..) => write!(f, "ToggleSamplingProfiler"), WindowEvent::ExitFullScreen(..) => write!(f, "ExitFullScreen"), WindowEvent::MediaSessionAction(..) => write!(f, "MediaSessionAction"), WindowEvent::ChangeBrowserVisibility(..) => write!(f, "ChangeBrowserVisibility"), } } } #[derive(Clone, Copy, Debug, PartialEq)] pub enum AnimationState { Idle, Animating, } pub trait WindowMethods { /// Presents the window to the screen (perhaps by page flipping). fn present(&self);<|fim▁hole|> /// Return the GL function pointer trait. #[cfg(feature = "gl")] fn gl(&self) -> Rc<dyn gl::Gl>; /// Get the coordinates of the native window, the screen and the framebuffer. fn get_coordinates(&self) -> EmbedderCoordinates; /// Set whether the application is currently animating. /// Typically, when animations are active, the window /// will want to avoid blocking on UI events, and just /// run the event loop at the vsync interval. fn set_animation_state(&self, _state: AnimationState); /// Get the GL context fn get_gl_context(&self) -> GlContext; /// Get the native display fn get_native_display(&self) -> NativeDisplay; /// Get the GL api fn get_gl_api(&self) -> GlApi; } pub trait EmbedderMethods { /// Returns a thread-safe object to wake up the window's event loop. fn create_event_loop_waker(&mut self) -> Box<dyn EventLoopWaker>; /// Register services with a VRServiceManager. fn register_vr_services( &mut self, _: &mut VRServiceManager, _: &mut Vec<Box<dyn WebVRMainThreadHeartbeat>>, ) { } /// Register services with a WebXR Registry. fn register_webxr( &mut self, _: &mut webxr::MainThreadRegistry, _: WebGlExecutor, _: SurfaceProviders, ) { } } #[derive(Clone, Copy, Debug)] pub struct EmbedderCoordinates { /// The pixel density of the display. pub hidpi_factor: Scale<f32, DeviceIndependentPixel, DevicePixel>, /// Size of the screen. pub screen: DeviceIntSize, /// Size of the available screen space (screen without toolbars and docks). pub screen_avail: DeviceIntSize, /// Size of the native window. pub window: (DeviceIntSize, DeviceIntPoint), /// Size of the GL buffer in the window. pub framebuffer: DeviceIntSize, /// Coordinates of the document within the framebuffer. pub viewport: DeviceIntRect, } impl EmbedderCoordinates { pub fn get_flipped_viewport(&self) -> DeviceIntRect { let fb_height = self.framebuffer.height; let mut view = self.viewport.clone(); view.origin.y = fb_height - view.origin.y - view.size.height; DeviceIntRect::from_untyped(&view.to_untyped()) } }<|fim▁end|>
/// Make the OpenGL context current. fn make_gl_context_current(&self);
<|file_name|>ObservationOfferingCache.java<|end_file_name|><|fim▁begin|>/** * Copyright (C) 2012 52°North Initiative for Geospatial Open Source Software GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.n52.sos.cache; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import org.n52.oxf.valueDomains.time.ITimePeriod; import org.n52.sos.dataTypes.EnvelopeWrapper; import org.n52.sos.dataTypes.ObservationOffering; import org.n52.sos.db.AccessGDB; public class ObservationOfferingCache extends AbstractEntityCache<ObservationOffering> { private static final String TOKEN_SEP = "@@"; private static ObservationOfferingCache instance; public static synchronized ObservationOfferingCache instance(String dbName) throws FileNotFoundException { if (instance == null) { instance = new ObservationOfferingCache(dbName); } return instance; } public static synchronized ObservationOfferingCache instance() throws FileNotFoundException { return instance; } private boolean cancelled; private ObservationOfferingCache(String dbName) throws FileNotFoundException { super(dbName); } @Override protected String getCacheFileName() { return "observationOfferingsList.cache"; } @Override protected String serializeEntity(ObservationOffering entity) throws CacheException { StringBuilder sb = new StringBuilder(); sb.append(entity.getId()); sb.append(TOKEN_SEP); sb.append(entity.getName()); sb.append(TOKEN_SEP); sb.append(entity.getProcedureIdentifier()); sb.append(TOKEN_SEP); try { sb.append(EnvelopeEncoderDecoder.encode(entity.getObservedArea())); } catch (IOException e) { throw new CacheException(e); } sb.append(TOKEN_SEP); sb.append(Arrays.toString(entity.getObservedProperties())); sb.append(TOKEN_SEP); sb.append(TimePeriodEncoder.encode(entity.getTimeExtent())); return sb.toString(); } @Override protected ObservationOffering deserializeEntity(String line) { String[] values = line.split(TOKEN_SEP); if (values == null || values.length != 6) { return null; } String id = values[0].trim(); String name = values[1].trim(); String proc = values[2].trim(); EnvelopeWrapper env = EnvelopeEncoderDecoder.decode(values[3]); String[] props = decodeStringArray(values[4]); ITimePeriod time = TimePeriodEncoder.decode(values[5]); return new ObservationOffering(id, name, props, proc, env, time); } @Override protected boolean mergeWithPreviousEntries() { return true;<|fim▁hole|> this.cancelled = false; clearTempCacheFile(); geoDB.getOfferingAccess().getNetworksAsObservationOfferingsAsync(new OnOfferingRetrieved() { int count = 0; @Override public void retrieveExpectedOfferingsCount(int c) { setMaximumEntries(c); } @Override public void retrieveOffering(ObservationOffering oo, int currentOfferingIndex) throws RetrievingCancelledException { storeTemporaryEntity(oo); setLatestEntryIndex(currentOfferingIndex); LOGGER.info(String.format("Added ObservationOffering #%s to the cache.", count++)); if (cancelled) { throw new RetrievingCancelledException("Cache update cancelled due to shutdown."); } } }); return Collections.emptyList(); } @Override protected AbstractEntityCache<ObservationOffering> getSingleInstance() { return instance; } @Override public void cancelCurrentExecution() { this.cancelled = true; } }<|fim▁end|>
} protected Collection<ObservationOffering> getCollectionFromDAO(AccessGDB geoDB) throws IOException {
<|file_name|>dojo.d.ts<|end_file_name|><|fim▁begin|>/************************************************************************/ /* Define core Dojo features */ /************************************************************************/ /// <reference path="dojo.types.d.ts"/> // dojo/ready declare module Dojo { interface Ready { (priority: number, context: Object, callback: SimpleAction): void; (context: Object, callback: SimpleAction): void; (callback: SimpleAction): void; } } declare module "dojo/ready" { var ready: Dojo.Ready; export = ready; } declare function require(module: "dojo/ready"): Dojo.Ready; // dojo/domReady declare module Dojo { interface DomReady { (callback: SimpleAction): void; } } declare module "dojo/domReady" { var domReady: Dojo.DomReady; export = domReady; } declare function require(module: "dojo/domReady"): Dojo.DomReady; // dojo/_base/array declare module Dojo { interface Array { indexOf<T>(array: T[], value: T, fromIndex?: number, findLast?: boolean): number; lastIndexOf<T>(array: T[], value: T, fromIndex?: number, findLast?: boolean): number; forEach<T>(array: T[], callback: string | ((item: T, index: number, array: T[]) => void), thisObject?: Object): void; forEach(array: string, callback: string | ((item: string, index: number, array: string) => void), thisObject?: Object): void; filter<T>(array: T[], callback: string | ((item: T, index: number, array: T[]) => boolean), thisObject?: Object): T[]; filter(array: string, callback: string | ((item: string, index: number, array: string) => boolean), thisObject?: Object): string[]; map<T, V>(array: T[], callback: string | ((item: T, index: number, array: T[]) => V), thisObject?: Object): V[]; map<V>(array: string, callback: string | ((item: string, index: number, array: string) => V), thisObject?: Object): V[]; some<T>(array: T[], callback: string | ((item: T, index: number, array: T[]) => boolean), thisObject?: Object): boolean; some(array: string, callback: string | ((item: string, index: number, array: string) => boolean), thisObject?: Object): boolean; every<T>(array: T[], callback: string | ((item: T, index: number, array: T[]) => boolean), thisObject?: Object): boolean; every(array: string, callback: string | ((item: string, index: number, array: string) => boolean), thisObject?: Object): boolean; } } declare module "dojo/_base/array" { var array: Dojo.Array; export = array; } declare function require(module: "dojo/_base/array"): Dojo.Array; // dojo/_base/browser declare module "dojo/_base/browser" { } declare function require(module: "dojo/_base/browser"): void; // dojo/_base/Color declare module Dojo { class Color { constructor(); constructor(colors: number[]); constructor(colors: [number, number, number]); constructor(colors: [number, number, number, number]); constructor(color: string); constructor(color: { r: number, g: number, b: number, a?: number }); a: number; r: number; g: number; b: number; toRgb(): [number, number, number]; toRgba(): [number, number, number, number]; toHex(): string; toCss(alpha?: boolean): string; } module Color { interface NamedColors { "aliceblue": [number, number, number]; "antiquewhite": [number, number, number]; "aquamarine": [number, number, number]; "azure": [number, number, number]; "beige": [number, number, number]; "bisque": [number, number, number]; "blanchedalmond": [number, number, number]; "blueviolet": [number, number, number]; "brown": [number, number, number]; "burlywood": [number, number, number]; "cadetblue": [number, number, number]; "chartreuse": [number, number, number]; "chocolate": [number, number, number]; "coral": [number, number, number]; "cornflowerblue": [number, number, number]; "cornsilk": [number, number, number]; "crimson": [number, number, number]; "cyan": [number, number, number]; "darkblue": [number, number, number]; "darkcyan": [number, number, number]; "darkgoldenrod": [number, number, number]; "darkgray": [number, number, number]; "darkgreen": [number, number, number]; "darkgrey": [number, number, number]; "darkkhaki": [number, number, number]; "darkmagenta": [number, number, number]; "darkolivegreen": [number, number, number]; "darkorange": [number, number, number]; "darkorchid": [number, number, number]; "darkred": [number, number, number]; "darksalmon": [number, number, number]; "darkseagreen": [number, number, number]; "darkslateblue": [number, number, number]; "darkslategray": [number, number, number]; "darkslategrey": [number, number, number]; "darkturquoise": [number, number, number]; "darkviolet": [number, number, number]; "deeppink": [number, number, number]; "deepskyblue": [number, number, number]; "dimgray": [number, number, number]; "dimgrey": [number, number, number]; "dodgerblue": [number, number, number]; "firebrick": [number, number, number]; "floralwhite": [number, number, number]; "forestgreen": [number, number, number]; "gainsboro": [number, number, number]; "ghostwhite": [number, number, number]; "gold": [number, number, number]; "goldenrod": [number, number, number]; "greenyellow": [number, number, number]; "grey": [number, number, number]; "honeydew": [number, number, number]; "hotpink": [number, number, number]; "indianred": [number, number, number]; "indigo": [number, number, number]; "ivory": [number, number, number]; "khaki": [number, number, number]; "lavender": [number, number, number]; "lavenderblush": [number, number, number]; "lawngreen": [number, number, number]; "lemonchiffon": [number, number, number]; "lightblue": [number, number, number]; "lightcoral": [number, number, number]; "lightcyan": [number, number, number]; "lightgoldenrodyellow": [number, number, number]; "lightgray": [number, number, number]; "lightgreen": [number, number, number]; "lightgrey": [number, number, number]; "lightpink": [number, number, number]; "lightsalmon": [number, number, number]; "lightseagreen": [number, number, number]; "lightskyblue": [number, number, number]; "lightslategray": [number, number, number]; "lightslategrey": [number, number, number]; "lightsteelblue": [number, number, number]; "lightyellow": [number, number, number]; "limegreen": [number, number, number]; "linen": [number, number, number]; "magenta": [number, number, number]; "mediumaquamarine": [number, number, number]; "mediumblue": [number, number, number]; "mediumorchid": [number, number, number]; "mediumpurple": [number, number, number]; "mediumseagreen": [number, number, number]; "mediumslateblue": [number, number, number]; "mediumspringgreen": [number, number, number]; "mediumturquoise": [number, number, number]; "mediumvioletred": [number, number, number]; "midnightblue": [number, number, number]; "mintcream": [number, number, number]; "mistyrose": [number, number, number]; "moccasin": [number, number, number]; "navajowhite": [number, number, number]; "oldlace": [number, number, number]; "olivedrab": [number, number, number]; "orange": [number, number, number]; "orangered": [number, number, number]; "orchid": [number, number, number]; "palegoldenrod": [number, number, number]; "palegreen": [number, number, number]; "paleturquoise": [number, number, number]; "palevioletred": [number, number, number]; "papayawhip": [number, number, number]; "peachpuff": [number, number, number]; "peru": [number, number, number]; "pink": [number, number, number]; "plum": [number, number, number]; "powderblue": [number, number, number]; "rosybrown": [number, number, number]; "royalblue": [number, number, number]; "saddlebrown": [number, number, number]; "salmon": [number, number, number]; "sandybrown": [number, number, number]; "seagreen": [number, number, number]; "seashell": [number, number, number]; "sienna": [number, number, number]; "skyblue": [number, number, number]; "slateblue": [number, number, number]; "slategray": [number, number, number]; "slategrey": [number, number, number]; "snow": [number, number, number]; "springgreen": [number, number, number]; "steelblue": [number, number, number]; "tan": [number, number, number]; "thistle": [number, number, number]; "tomato": [number, number, number]; "turquoise": [number, number, number]; "violet": [number, number, number]; "wheat": [number, number, number]; "whitesmoke": [number, number, number]; "yellowgreen": [number, number, number]; } var named: NamedColors; } } declare module "dojo/_base/Color" { class color extends Dojo.Color { } module color { var named: Dojo.Color.NamedColors; } export = color; } declare function require(module: "dojo/_base/Color"): Dojo.Color; // dojo/_base/config declare module Dojo { interface Config { addOnLoad: Object; // TODO afterOnLoad: string; baseUrl: string; callback: Function; debugContainerId: string; debugHeight: number; defaultDuration: number; deferredInstrumentation: string; deps: string[]; dojoBlankHtmlUrl: string; extraLocale: string[]; ioPublish: boolean; isDebug: boolean; locale: string; modulePaths: { [path: string]: string; }; parseOnLoad: boolean; require: string[]; transparentColor: [number, number, number]; urchin: string; useCustomLogger: boolean; useDeferredInstrumentation: Object; } } declare module "dojo/_base/config" { var config: Dojo.Config; export = config; } declare function require(module: "dojo/_base/config"): Dojo.Config; // dojo/_base/fx declare module Dojo { module Fx { interface BaseCreateOptions { node: any; duration?: number; easing?: EasingFunction; } interface CreateOptions extends BaseCreateOptions { properties: StylesMap; } interface Base { anim(nodeId: string | HTMLElement, properties: PropertiesMap, duration?: number, easing?: EasingFunction, onEnd?: SimpleAction, delay?: number): dojo.Animation; animateProperty(args: CreateOptions): dojo.Animation; fadeIn(args: BaseCreateOptions): dojo.Animation; fadeOut(args: BaseCreateOptions): dojo.Animation; } } } declare module "dojo/_base/fx" { var fx: Dojo.Fx.Base; export = fx; } declare function require(module: "dojo/_base/fx"): Dojo.Fx.Base; // dojo/_base/lang declare module Dojo { interface Lang { clone<T>(obj: T): T; delegate(obj: Object, props: PropertiesMap): Object; exists(path: string, root?: Object): boolean; extend<T extends Object>(ctor: T, ...props: PropertiesMap[]): T; getObject(path: string, create?: boolean, context?: Object): Object; hitch<F extends Function>(scope: Object, method: string | F): F; mixin<T extends Object>(dest: T, ...sources: PropertiesMap[]): T; partial<F extends Function>(method: string | F, ...v_args: any[]): F; replace(tmpl: string, map: string[] | Dictionary<any> | ((matched: string, key: string, offset: number, template: string) => string), pattern?: string): string; setObject(path: string, value: any, thisObject?: Object): Object; trim(str: string): string; // NOTE: The following functions are deprecated isString(v: any): boolean; isArray(v: any): boolean; isFunction(v: any): boolean; isArrayLike(v: any): boolean; isObject(v: any): boolean; } } declare module "dojo/_base/lang" { var lang: Dojo.Lang; export = lang; } declare function require(module: "dojo/_base/lang"): Dojo.Lang; // dojo/AdapterRegistry declare module Dojo { class AdapterRegistry { constructor(returnWrappers?: boolean); pairs: any[]; returnWrappers: boolean; match(...args: any[]): void; register(name: string, check: FunctionReturning<boolean>, wrap: Action, directReturn?: boolean, override?: boolean): void; unregister(name: string): boolean; } } declare module "dojo/AdapterRegistry" { export = Dojo.AdapterRegistry; } declare function require(module: "dojo/AdapterRegistry"): Dojo.AdapterRegistry; // dojo/aspect declare module Dojo { interface Aspect { after(target: Object, methodName: string, advice: (x: any) => any): RemovableHandle; after(target: Object, methodName: string, advice: Function, receiveArguments: boolean): RemovableHandle; around(target: Object, methodName: string, advice: (fn: Function) => Function): RemovableHandle; before(target: Object, methodName: string, advice: FunctionReturning<any[]>): RemovableHandle; } } declare module "dojo/aspect" { var aspect: Dojo.Aspect; export = aspect; } declare function require(module: "dojo/aspect"): Dojo.Aspect; // dojo/back declare module Dojo { interface HistoryState { back?: (direction: string) => void; forward?: (direction: string) => void; changeUrl?: any; } interface Back { init(): void; addToHistory(args: HistoryState): void; getHash(): any; } } declare module "dojo/back" { var back: Dojo.Back; export = back; } declare function require(module: "dojo/back"): Dojo.Back; // dojo.cache declare module Dojo { interface Cache { <T>(url: string): T; <T>(url: string, value: T): void; <T>(module: string, url: string): T; <T>(module: string, url: string, value: T): void; } } declare module "dojo/cache" { var cache: Dojo.Cache; export = cache; } declare function require(module: "dojo/cache"): Dojo.Cache; // dojo/cldr/monetary declare module Dojo { module Cldr { interface Monetary { getData(code: string): string; } } } declare module "dojo/cldr/monetary" { var monetary: Dojo.Cldr.Monetary; export = monetary; } declare function require(module: "dojo/cldr/monetary"): Dojo.Cldr.Monetary; // dojo/cldr/supplemental declare module Dojo { module Cldr { interface Supplemental { getFirstDayOfWeek(locale?: string): number; getWeekend(locale?: string): { start: number; end: number; }; } } } declare module "dojo/cldr/supplemental"<|fim▁hole|>} declare function require(module: "dojo/cldr/supplemental"): Dojo.Cldr.Supplemental; // dojo/cookie declare module Dojo { interface Cookie { (name: string): string; (name: string, value: string, props?: { expires?: any; path?: string; domain?: string; secure?: boolean; }): void; isSupported(): boolean; } } declare module "dojo/cookie" { var cookie: Dojo.Cookie; export = cookie; } declare function require(module: "dojo/cookie"): Dojo.Cookie; // dojo/currency declare module Dojo { interface _CurrencyFormatOptions { currency?: string; fractional?: boolean; locale?: string; pattern?: string; places?: number; round?: number; symbol?: string; type?: string; strict?: boolean; } interface Currency { format(value: number, options?: _CurrencyFormatOptions): string; parse(expression: string, options?: _CurrencyFormatOptions): number; regexp(options?: _CurrencyFormatOptions): RegExp; } } declare module "dojo/currency" { var currency: Dojo.Currency; export = currency; } declare function require(module: "dojo/currency"): Dojo.Currency; // dojo/date interface _HTMLDate extends Date { } declare module Dojo { module Date { interface Base { add(date: _HTMLDate, interval: string, amount: number): _HTMLDate; compare(date1: _HTMLDate, date2?: _HTMLDate, portion?: string): number; difference(date1: _HTMLDate, date2?: _HTMLDate, interval?: string): number; getDaysInMonth(dateObject: _HTMLDate): number; getTimezoneName(dateObject: _HTMLDate): string; isLeapYear(dateObject: _HTMLDate): boolean; } } } declare module "dojo/date" { var date: Dojo.Date.Base; export = date; } declare function require(module: "dojo/date"): Dojo.Date.Base; // dojo/date/stamp declare module Dojo { module Date { interface Stamp { fromISOString(formattedString: string, defaultTime?: number): _HTMLDate; toISOString(dateObject: _HTMLDate, options?: { selector?: string; zulu?: boolean; milliseconds?: number; }): string; } } } declare module "dojo/date/stamp" { var stamp: Dojo.Date.Stamp; export = stamp; } declare function require(module: "dojo/date/stamp"): Dojo.Date.Stamp; // dojo/date/locale declare module Dojo { module Date { interface FormatOptions { selector?: string; formatLength?: string; datePattern?: string; timePattern?: string; am?: string; pm?: string; locale?: string; fullYear?: boolean; strict?: boolean; } interface Locale { addCustomFormats(packageName: string, bundleName: string): void; format(dateObject: _HTMLDate, options?: FormatOptions): string; getNames(item: string, type: string, context?: string, locale?: string): string[]; isWeekend(dateObject?: _HTMLDate, locale?: string): boolean; parse(value: string, options?: FormatOptions): _HTMLDate; regexp(options?: FormatOptions): RegExp; } } } declare module "dojo/date/locale" { var locale: Dojo.Date.Locale; export = locale; } declare function require(module: "dojo/date/locale"): Dojo.Date.Locale; // dojo/Deferred declare module dojo { class Deferred<T> { constructor(canceler?: (reason: any) => void); promise: Promise<T>; isCanceled(): boolean; isFulfilled(): boolean; isRejected(): boolean; isResolved(): boolean; progress(update: any, strict?: boolean): void; reject(reason: any, strict?: boolean): void; resolve(value: T, strict?: boolean): void; cancel(reason: any, strict?: boolean): any; then<V>(callback?: (value: T) => V | Promise<V> | Deferred<V>, errback?: (error: any) => void, progback?: (progress: any) => void): Promise<V>; } } declare module "dojo/Deferred" { var Deferred: typeof dojo.Deferred; export = Deferred; } declare function require<T>(depends: "dojo/Deferred"): dojo.Deferred<T>; // dojo/dom declare module Dojo { interface Dom { byId<T extends HTMLElement>(node: string | T): T; isDescendant(nbode: string | HTMLElement, ancestor: string | HTMLElement): boolean; setSelectable(node: string | HTMLElement, selectable: boolean): void; } } declare module "dojo/dom" { var dom: Dojo.Dom; export = dom; } declare function require(module: "dojo/dom"): Dojo.Dom; // dojo/dom-attr declare module Dojo { interface DomAttr { has(node: string | HTMLElement, attr: string): boolean; get(node: string | HTMLElement, attr: string): string; set(node: string | HTMLElement, attr: string, value: string): void; set(node: string | HTMLElement, values: AttributesMap): void; remove(node: string | HTMLElement, attr: string): string; getNodeProp(node: string | HTMLElement, attr: string): any; } } declare module "dojo/dom-attr" { var domAttr: Dojo.DomAttr; export = domAttr; } declare function require(module: "dojo/dom-attr"): Dojo.DomAttr; // dojo/dom-class declare module Dojo { interface DomClass { contains(node: string | HTMLElement, className: string): boolean; add(node: string | HTMLElement, classNames: string | string[]): void; remove(node: string | HTMLElement, classNames?: string | string[]): void; replace(node: string | HTMLElement, addClassNames: string | string[], removeClassNames: string | string[]): void; toggle(node: string | HTMLElement, className: string, addRemove?: boolean): void; } } declare module "dojo/dom-class" { var domClass: Dojo.DomClass; export = domClass; } declare function require(module: "dojo/dom-class"): Dojo.DomClass; // dojo/dom-construct declare module Dojo { interface DomConstruct { toDom(frag: string, doc?: HTMLDocument): HTMLElement; place<T extends HTMLElement>(node: string | T, refNode: string | HTMLElement, pos?: string | number): T; create(id: "a", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLAnchorElement create(id: "abbr", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "address", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "area", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLAreaElement create(id: "article", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "aside", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "audio", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLAudioElement create(id: "b", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "base", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLBaseElement create(id: "bdi", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "bdo", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "blockquote", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLQuoteElement create(id: "body", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLBodyElement create(id: "br", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLBRElement create(id: "button", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLButtonElement create(id: "canvas", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLCanvasElement create(id: "caption", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLTableCaptionElement create(id: "cite", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "code", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "col", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLTableColElement create(id: "colgroup", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLTableColElement create(id: "datalist", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLDataListElement create(id: "dd", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "del", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLModElement create(id: "dfn", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "div", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLDivElement create(id: "dl", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLDListElement create(id: "dt", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "em", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "embed", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLEmbedElement create(id: "fieldset", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLFieldSetElement create(id: "figcaption", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "figure", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "footer", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "form", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLFormElement create(id: "h1", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLHeadingElement create(id: "h2", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLHeadingElement create(id: "h3", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLHeadingElement create(id: "h4", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLHeadingElement create(id: "h5", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLHeadingElement create(id: "h6", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLHeadingElement create(id: "head", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLHeadElement create(id: "header", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "hgroup", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "hr", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLHRElement create(id: "html", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLHtmlElement create(id: "i", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "iframe", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLIFrameElement create(id: "img", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLImageElement create(id: "input", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLInputElement create(id: "ins", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLModElement create(id: "kbd", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "label", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLLabelElement create(id: "legend", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLLegendElement create(id: "li", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLLIElement create(id: "link", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLLinkElement create(id: "main", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "map", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLMapElement create(id: "mark", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "menu", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLMenuElement create(id: "meta", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLMetaElement create(id: "nav", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "noscript", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "object", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLObjectElement create(id: "ol", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLOListElement create(id: "optgroup", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLOptGroupElement create(id: "option", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLOptionElement create(id: "p", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLParagraphElement create(id: "param", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLParamElement create(id: "pre", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLPreElement create(id: "progress", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLProgressElement create(id: "q", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLQuoteElement create(id: "rp", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "rt", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "ruby", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "s", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "samp", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "script", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLScriptElement create(id: "section", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "select", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLSelectElement create(id: "small", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "source", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLSourceElement create(id: "span", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLSpanElement create(id: "strong", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "style", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLStyleElement create(id: "sub", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "summary", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "sup", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "table", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLTableElement create(id: "tbody", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLTableSectionElement create(id: "td", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLTableDataCellElement create(id: "textarea", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLTextAreaElement create(id: "tfoot", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLTableSectionElement create(id: "th", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLTableHeaderCellElement create(id: "thead", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLTableSectionElement create(id: "title", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLTitleElement create(id: "tr", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLTableRowElement create(id: "track", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLTrackElement create(id: "u", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "ul", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLUListElement create(id: "var", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: "video", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLVideoElement create(id: "wbr", attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement create(id: string, attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): HTMLElement; create<T extends HTMLElement>(tag: T, attrs?: AttributesMap, refNode?: string | HTMLElement, pos?: string): T; empty(node: string | HTMLElement): void; destroy(node: string | HTMLElement): void; } } declare module "dojo/dom-construct" { var domConstruct: Dojo.DomConstruct; export = domConstruct; } declare function require(module: "dojo/dom-construct"): Dojo.DomConstruct; // dojo/dom-form declare module Dojo { interface DomForm { fieldToObject(inputNode: string | HTMLElement): any; toJson(formNode: string | HTMLElement, prettyPrint?: boolean): string; toObject(formNode: string | HTMLElement): Object; toQuery(formNode: string | HTMLElement): string; } } declare module "dojo/dom-form" { var domForm: Dojo.DomForm; export = domForm; } declare function require(module: "dojo/dom-form"): Dojo.DomForm; // dojo/dom-geometry declare module Dojo { interface DomGeometry { boxModel: string; docScroll(doc?: HTMLDocument): { node: HTMLElement; x: number; y: number; }; fixIeBiDiScrollLeft(scrollLeft: number, doc?: HTMLDocument): number; getBorderExtents(node: HTMLElement, computedStyle?: StylesMap): Box; getContentBox(node: HTMLElement, computedStyle?: StylesMap): Position; getIeDocumentElementOffset(doc?: HTMLDocument): Point; getMarginBox(node: HTMLElement, computedStyle?: StylesMap): Position; getMarginExtents(node: HTMLElement, computedStyle?: StylesMap): Position; getMarginSize(node: HTMLElement, computedStyle?: StylesMap): Size; getPadBorderExtents(node: HTMLElement, computedStyle?: StylesMap): Box; getPadExtents(node: HTMLElement, computedStyle?: StylesMap): Box; isBodyLtr(doc?: HTMLDocument): boolean; normalizeEvent(event: { pageX?: number; pageY?: number; offsetX?: number; offsetY?: number; layerX?: number; layerY?: number; }): void; position(node: string | HTMLElement, includeScroll?: boolean): Position; setContentSize(node: HTMLElement, box: Size, computedStyle?: StylesMap): void; setMarginBox(node: HTMLElement, box: Size, computedStyle?: StylesMap): void; } } declare module "dojo/dom-geometry" { var domGeometry: Dojo.DomGeometry; export = domGeometry; } declare function require(module: "dojo/dom-geometry"): Dojo.DomGeometry; // dojo/dom-prop declare module Dojo { interface DomProp { get(node: string | HTMLElement, attr: string): string; set(node: string | HTMLElement, attr: string, value: string): void; set(node: string | HTMLElement, values: AttributesMap): void; } } declare module "dojo/dom-prop" { var domProp: Dojo.DomProp; export = domProp; } declare function require(module: "dojo/dom-prop"): Dojo.DomProp; // dojo/dom-style declare module Dojo { interface DomStyle { getComputedStyle(node: HTMLElement): StylesMap; get(node: string | HTMLElement, style?: string): StylesMap; set(node: string | HTMLElement, style: string, value: string): void; set(node: string | HTMLElement, values: StylesMap): void; } } declare module "dojo/dom-style" { var domStyle: Dojo.DomStyle; export = domStyle; } declare function require(module: "dojo/dom-style"): Dojo.DomStyle; // dojo/Evented declare module "dojo/Evented" { var Evented: dojo.Evented; export = Evented; } declare function require(module: "dojo/Evented"): dojo.Evented; // dojo/fx declare module Dojo { module Fx { interface SlideCreateOptions extends CreateOptions { top: string; left: string; } interface AutoSlideCreateOptions extends SlideCreateOptions { auto: any; } interface Module { chain(animations: dojo.Animation[]): dojo.Animation; combine(animations: dojo.Animation[]): dojo.Animation; slideTo(args: SlideCreateOptions): dojo.Animation; Toggler: new (args: TogglerCreateOptions) => Toggler; wipeIn(args: CreateOptions): dojo.Animation; wipeOut(args: CreateOptions): dojo.Animation; } } } declare module "dojo/fx" { var fx: Dojo.Fx.Module; export = fx; } declare function require(module: "dojo/fx"): Dojo.Fx.Module; // dojo/fx/easing declare module Dojo { module Fx { interface Easing { backIn(n: number): number; backInOut(n: number): number; backOut(n: number): number; bounceIn(n: number): number; bounceInOut(n: number): number; bounceOut(n: number): number; circIn(n: number): number; circInOut(n: number): number; circOut(n: number): number; cubicIn(n: number): number; cubicInOut(n: number): number; cubicOut(n: number): number; elasticIn(n: number): number; elasticInOut(n: number): number; elasticOut(n: number): number; expoIn(n: number): number; expoInOut(n: number): number; expoOut(n: number): number; linearIn(n: number): number; linearInOut(n: number): number; linearOut(n: number): number; quadIn(n: number): number; quadInOut(n: number): number; quadOut(n: number): number; quartIn(n: number): number; quartInOut(n: number): number; quartOut(n: number): number; quintIn(n: number): number; quintInOut(n: number): number; quintOut(n: number): number; sineIn(n: number): number; sineInOut(n: number): number; sineOut(n: number): number; } } } declare module "dojo/fx/easing" { var easing: Dojo.Fx.Easing; export = easing; } declare function require(module: "dojo/fx/easing"): Dojo.Fx.Easing; // dojo/fx/Toggler declare module Dojo { module Fx { interface TogglerCreateOptions { node: any; showDuration?: number; showFunc?: (args: BaseCreateOptions) => dojo.Animation; hideDuration?: number; hideFuc?: (args: BaseCreateOptions) => dojo.Animation; } class Toggler { constructor(args: TogglerCreateOptions); hideDuration: number; node: HTMLElement; showDuration: number; hide(delay?: number): dojo.Animation; hideFunc(args?: { node: any; duration?: number; easing: EasingFunction; }): dojo.Animation; show(delay?: number): dojo.Animation; showFunc(args?: { node: any; duration?: number; easing: EasingFunction; }): dojo.Animation; } } } declare module "dojo/fx/Toggler" { var Toggler: typeof Dojo.Fx.Toggler; export = Toggler; } declare function require(module: "dojo/fx/Toggler"): Dojo.Fx.Toggler; // dojo/has declare module Dojo { interface Has { (feature: string | number): any; add(feature: string | number, test: (global: Object, doc: Document, element: Object) => boolean, now?: boolean, force?: boolean): void; clearElement(element: Object): void; load<T>(id: string, parentRequire: Function, loaded: (m: T) => void): void; normalize(id: number, toAbsMid: (id: number) => number): void; } } declare module "dojo/has" { var has: Dojo.Has; export = has; } declare function require(module: "dojo/has"): Dojo.Has; // dojo/hash declare module Dojo { interface Hash { (hash?: string, replace?: boolean): string; } } declare module "dojo/hash" { var hash: Dojo.Hash; export = hash; } declare function require(module: "dojo/hash"): Dojo.Hash; // dojo/html declare module Dojo { interface _HtmlContentSetterOptions { cleanContent?: boolean; extractContent?: boolean; parseContent?: boolean; parserScope?: string; startup?: boolean; onBegin(): void; onContentError(err: any): string; onEnd(): void; } interface Html { set(node: HTMLElement, content: string | HTMLElement | NodeList | dojo.NodeList | HTMLElement[], params?: _HtmlContentSetterOptions): void; } } declare module "dojo/html" { var html: Dojo.Html; export = html; } declare function require(module: "dojo/html"): Dojo.Html; // dojo/io-query declare module Dojo { interface IOQuery { objectToQuery(map: Dojo.Dictionary<any>): string; queryToObject(str: string): Dojo.Dictionary<string>; } } declare module "dojo/io-query" { var ioQuery: Dojo.IOQuery; export = ioQuery; } declare function require(module: "dojo/io-query"): Dojo.IOQuery; // dojo/keys declare module Dojo { interface Keys { BACKSPACE: number; TAB: number; CLEAR: number; ENTER: number; SHIFT: number; CTRL: number; ALT: number; META: number; PAUSE: number; CAPS_LOCK: number; ESCAPE: number; SPACE: number; PAGE_UP: number; PAGE_DOWN: number; END: number; HOME: number; LEFT_ARROW: number; UP_ARROW: number; RIGHT_ARROW: number; DOWN_ARROW: number; INSERT: number; DELETE: number; HELP: number; LEFT_WINDOW: number; RIGHT_WINDOW: number; SELECT: number; NUMPAD_0: number; NUMPAD_1: number; NUMPAD_2: number; NUMPAD_3: number; NUMPAD_4: number; NUMPAD_5: number; NUMPAD_6: number; NUMPAD_7: number; NUMPAD_8: number; NUMPAD_9: number; NUMPAD_MULTIPLY: number; NUMPAD_PLUS: number; NUMPAD_ENTER: number; NUMPAD_MINUS: number; NUMPAD_PERIOD: number; NUMPAD_DIVIDE: number; F1: number; F2: number; F3: number; F4: number; F5: number; F6: number; F7: number; F8: number; F9: number; F10: number; F11: number; F12: number; F13: number; F14: number; F15: number; NUM_LOCK: number; SCROLL_LOCK: number; copyKey: number; } } declare module "dojo/keys" { var keys: Dojo.Keys; export = keys; } declare function require(module: "dojo/keys"): Dojo.Keys; // dojo/json declare module Dojo { interface Json { parse(str: string, secured?: boolean): any; stringify(obj: any, replacer?: any[] | ((key: string, value: string) => any), space?: boolean): string; } } declare module "dojo/json" { var json: Dojo.Json; export = json; } declare function require(module: "dojo/json"): Dojo.Json; // dojo/mouse declare module Dojo { interface Mouse { enter: ExtensionEvent; leave: ExtensionEvent; isLeft(event: MouseEvent): boolean; isMiddle(event: MouseEvent): boolean; isRight(event: MouseEvent): boolean; } } declare module "dojo/mouse" { var mouse: Dojo.Mouse; export = mouse; } declare function require(module: "dojo/mouse"): Dojo.Mouse; // dojo/NodeList interface _HTMLNodeList extends NodeList { } declare module Dojo { module Fx { interface AutoCreateOptions extends CreateOptions { auto: any; } interface AutoBaseCreateOptions extends BaseCreateOptions { auto: any; } } } declare module dojo { type NodesLike = HTMLElement | HTMLElement[] | _HTMLNodeList | dojo.NodeList; class NodeList { constructor(node: NodesLike); // Make NodeList array-like length: number; [index: number]: HTMLElement; addClass(classNames: string | string[]): dojo.NodeList; addClassFx(classNames: string | string[], options?: Dojo.Fx.CreateOptions): Animation; addContent(content: string | NodesLike | { template: string; parse?: boolean; templateFunc?: (template: string, content: Object) => Object; }, position?: string | number): dojo.NodeList; adopt(nodes: string | NodesLike, position?: string | number): dojo.NodeList; after(content: string | Object | NodesLike): dojo.NodeList; andSelf(): dojo.NodeList; anim(properties: Dojo.PropertiesMap, duration?: number, easing?: Dojo.Fx.EasingFunction, onEnd?: Dojo.SimpleAction, delay?: number): Animation; animateProperty(args: Dojo.Fx.AutoCreateOptions | Dojo.Fx.CreateOptions): dojo.NodeList; append(content: string | Object | NodesLike): dojo.NodeList; appendTo(query: string): dojo.NodeList; at(...index: number[]): dojo.NodeList; attr(property: string): string[]; attr(property: string, value: string): dojo.NodeList; before(content: string | Object | NodesLike): dojo.NodeList; children(query?: string): dojo.NodeList; clone(): dojo.NodeList; closest(query: string, root?: string | Object): HTMLElement; concat(...items: HTMLElement[]): dojo.NodeList; connect(methodName: string, func: string | Function): dojo.NodeList; connect(methodName: string, obj: Object, funcName: string): dojo.NodeList; data(key: Dojo.PropertiesMap): dojo.NodeList; data(key: string): any; data(key: string, value: any): dojo.NodeList; delegate(selector: string, eventName: "abort", listener: Dojo.EventListener<UIEvent>): dojo.NodeList; delegate(selector: string, eventName: "afterprint", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "beforeprint", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "beforeunload", listener: Dojo.EventListener<BeforeUnloadEvent>): dojo.NodeList; delegate(selector: string, eventName: "blur", listener: Dojo.EventListener<FocusEvent>): dojo.NodeList; delegate(selector: string, eventName: "canplay", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "canplaythrough", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "change", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "click", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; delegate(selector: string, eventName: "contextmenu", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; delegate(selector: string, eventName: "dblclick", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; delegate(selector: string, eventName: "drag", listener: Dojo.EventListener<DragEvent>): dojo.NodeList; delegate(selector: string, eventName: "dragend", listener: Dojo.EventListener<DragEvent>): dojo.NodeList; delegate(selector: string, eventName: "dragenter", listener: Dojo.EventListener<DragEvent>): dojo.NodeList; delegate(selector: string, eventName: "dragleave", listener: Dojo.EventListener<DragEvent>): dojo.NodeList; delegate(selector: string, eventName: "dragover", listener: Dojo.EventListener<DragEvent>): dojo.NodeList; delegate(selector: string, eventName: "dragstart", listener: Dojo.EventListener<DragEvent>): dojo.NodeList; delegate(selector: string, eventName: "drop", listener: Dojo.EventListener<DragEvent>): dojo.NodeList; delegate(selector: string, eventName: "durationchange", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "emptied", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "ended", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "focus", listener: Dojo.EventListener<FocusEvent>): dojo.NodeList; delegate(selector: string, eventName: "hashchange", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "input", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "keydown", listener: Dojo.EventListener<KeyboardEvent>): dojo.NodeList; delegate(selector: string, eventName: "keypress", listener: Dojo.EventListener<KeyboardEvent>): dojo.NodeList; delegate(selector: string, eventName: "keyup", listener: Dojo.EventListener<KeyboardEvent>): dojo.NodeList; delegate(selector: string, eventName: "load", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "loadeddata", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "loadedmetadata", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "loadstart", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "message", listener: Dojo.EventListener<MessageEvent>): dojo.NodeList; delegate(selector: string, eventName: "mousedown", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; delegate(selector: string, eventName: "mousemove", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; delegate(selector: string, eventName: "mouseout", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; delegate(selector: string, eventName: "mouseover", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; delegate(selector: string, eventName: "mouseup", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; delegate(selector: string, eventName: "mousewheel", listener: Dojo.EventListener<MouseWheelEvent>): dojo.NodeList; delegate(selector: string, eventName: "offline", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "online", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "pause", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "play", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "playing", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "progress", listener: (ev: any) => void): dojo.NodeList; delegate(selector: string, eventName: "ratechange", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "readystatechange", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "reset", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "resize", listener: Dojo.EventListener<UIEvent>): dojo.NodeList; delegate(selector: string, eventName: "scroll", listener: Dojo.EventListener<UIEvent>): dojo.NodeList; delegate(selector: string, eventName: "seeked", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "seeking", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "select", listener: Dojo.EventListener<UIEvent>): dojo.NodeList; delegate(selector: string, eventName: "stalled", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "storage", listener: Dojo.EventListener<StorageEvent>): dojo.NodeList; delegate(selector: string, eventName: "submit", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "suspend", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "timeupdate", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "unload", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "volumechange", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: "waiting", listener: EventListener): dojo.NodeList; delegate(selector: string, eventName: string, listener: EventListener): dojo.NodeList; dtl(template: string, thisObject?: Object): dojo.NodeList; empty(): dojo.NodeList; end(): dojo.NodeList; even(): dojo.NodeList; every(callback: (node: HTMLElement, index: number, list: dojo.NodeList) => boolean, thisObject?: Object): boolean; fadeIn(args?: Dojo.Fx.AutoBaseCreateOptions | Dojo.Fx.BaseCreateOptions): dojo.NodeList; fadeOut(args?: Dojo.Fx.AutoBaseCreateOptions | Dojo.Fx.BaseCreateOptions): dojo.NodeList; filter(filter: string | ((item: HTMLElement, index: number, list: dojo.NodeList) => boolean)): dojo.NodeList; first(): dojo.NodeList; forEach(callback: (item: HTMLElement, index: number, list: dojo.NodeList) => void, thisObject?: Object): dojo.NodeList; html(): string; html(content: string | NodesLike): dojo.NodeList; indexOf(value: HTMLElement, fromIndex?: number): number; innerHTML(): string; innerHTML(content: string | NodesLike): dojo.NodeList; insertAfter(query: string): dojo.NodeList; insertBefore(query: string): dojo.NodeList; instantiate(declaredClass: string, properties?: Dojo.PropertiesMap): dojo.NodeList; last(): dojo.NodeList; lastIndexOf(value: HTMLElement, fromIndex?: number): number; map(func: (item: HTMLElement, index: number, list: dojo.NodeList) => HTMLElement, thisObject?: Object): dojo.NodeList; marginBox(): Position; next(query?: string): HTMLElement; nextAll(query?: string): dojo.NodeList; odd(): dojo.NodeList; on(name: "abort", listener: Dojo.EventListener<UIEvent>): dojo.NodeList; on(name: "afterprint", listener: EventListener): dojo.NodeList; on(name: "beforeprint", listener: EventListener): dojo.NodeList; on(name: "beforeunload", listener: Dojo.EventListener<BeforeUnloadEvent>): dojo.NodeList; on(name: "blur", listener: Dojo.EventListener<FocusEvent>): dojo.NodeList; on(name: "canplay", listener: EventListener): dojo.NodeList; on(name: "canplaythrough", listener: EventListener): dojo.NodeList; on(name: "change", listener: EventListener): dojo.NodeList; on(name: "click", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; on(name: "contextmenu", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; on(name: "dblclick", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; on(name: "drag", listener: Dojo.EventListener<DragEvent>): dojo.NodeList; on(name: "dragend", listener: Dojo.EventListener<DragEvent>): dojo.NodeList; on(name: "dragenter", listener: Dojo.EventListener<DragEvent>): dojo.NodeList; on(name: "dragleave", listener: Dojo.EventListener<DragEvent>): dojo.NodeList; on(name: "dragover", listener: Dojo.EventListener<DragEvent>): dojo.NodeList; on(name: "dragstart", listener: Dojo.EventListener<DragEvent>): dojo.NodeList; on(name: "drop", listener: Dojo.EventListener<DragEvent>): dojo.NodeList; on(name: "durationchange", listener: EventListener): dojo.NodeList; on(name: "emptied", listener: EventListener): dojo.NodeList; on(name: "ended", listener: EventListener): dojo.NodeList; on(name: "focus", listener: Dojo.EventListener<FocusEvent>): dojo.NodeList; on(name: "hashchange", listener: EventListener): dojo.NodeList; on(name: "input", listener: EventListener): dojo.NodeList; on(name: "keydown", listener: Dojo.EventListener<KeyboardEvent>): dojo.NodeList; on(name: "keypress", listener: Dojo.EventListener<KeyboardEvent>): dojo.NodeList; on(name: "keyup", listener: Dojo.EventListener<KeyboardEvent>): dojo.NodeList; on(name: "load", listener: EventListener): dojo.NodeList; on(name: "loadeddata", listener: EventListener): dojo.NodeList; on(name: "loadedmetadata", listener: EventListener): dojo.NodeList; on(name: "loadstart", listener: EventListener): dojo.NodeList; on(name: "message", listener: Dojo.EventListener<MessageEvent>): dojo.NodeList; on(name: "mousedown", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; on(name: "mousemove", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; on(name: "mouseout", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; on(name: "mouseover", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; on(name: "mouseup", listener: Dojo.EventListener<MouseEvent>): dojo.NodeList; on(name: "mousewheel", listener: Dojo.EventListener<MouseWheelEvent>): dojo.NodeList; on(name: "offline", listener: EventListener): dojo.NodeList; on(name: "online", listener: EventListener): dojo.NodeList; on(name: "pause", listener: EventListener): dojo.NodeList; on(name: "play", listener: EventListener): dojo.NodeList; on(name: "playing", listener: EventListener): dojo.NodeList; on(name: "progress", listener: (ev: any) => void): dojo.NodeList; on(name: "ratechange", listener: EventListener): dojo.NodeList; on(name: "readystatechange", listener: EventListener): dojo.NodeList; on(name: "reset", listener: EventListener): dojo.NodeList; on(name: "resize", listener: Dojo.EventListener<UIEvent>): dojo.NodeList; on(name: "scroll", listener: Dojo.EventListener<UIEvent>): dojo.NodeList; on(name: "seeked", listener: EventListener): dojo.NodeList; on(name: "seeking", listener: EventListener): dojo.NodeList; on(name: "select", listener: Dojo.EventListener<UIEvent>): dojo.NodeList; on(name: "stalled", listener: EventListener): dojo.NodeList; on(name: "storage", listener: Dojo.EventListener<StorageEvent>): dojo.NodeList; on(name: "submit", listener: EventListener): dojo.NodeList; on(name: "suspend", listener: EventListener): dojo.NodeList; on(name: "timeupdate", listener: EventListener): dojo.NodeList; on(name: "unload", listener: EventListener): dojo.NodeList; on(name: "volumechange", listener: EventListener): dojo.NodeList; on(name: "waiting", listener: EventListener): dojo.NodeList; on(name: string, listener: EventListener): dojo.NodeList; on(type: Dojo.ExtensionEvent, listener: Dojo.Action): dojo.NodeList; orphan(filter?: string): dojo.NodeList; parent(query?: string): dojo.NodeList; parents(query?: string): dojo.NodeList; place(query: string | HTMLElement, position?: string | number): dojo.NodeList; position(): Dojo.Rectangle; prepend(content: string | Object | NodesLike): dojo.NodeList; prependTo(query: string): dojo.NodeList; prev(query?: string): HTMLElement; prevAll(query?: string): dojo.NodeList; query(query: string): dojo.NodeList; remove(filter?: string): dojo.NodeList; removeAttr(name: string): dojo.NodeList; removeClass(classNames?: string | string[]): dojo.NodeList; removeClassFx(classNames: string | string[], args?: Dojo.Fx.CreateOptions): Animation; removeData(key?: string): dojo.NodeList; replaceAll(query: string): dojo.NodeList; replaceClass(addClasses: string | string[], removeClasses?: string | string[]): dojo.NodeList; replaceWith(content: string | Object | NodesLike): dojo.NodeList; siblings(query?: string): dojo.NodeList; slice(begin: number, end?: number): dojo.NodeList; slideTo(args: Dojo.Fx.AutoSlideCreateOptions | Dojo.Fx.SlideCreateOptions): Animation; some(callback: (node: HTMLElement, index: number, list: dojo.NodeList) => boolean, thisObject?: Object): boolean; splice(index: number, howmany?: number, ...items: HTMLElement[]): dojo.NodeList; style(property: string): string[]; style(property: string, value: string): dojo.NodeList; text(): string; text(value: string): dojo.NodeList; toggleClass(className: string, condition?: boolean): dojo.NodeList; toggleClassFx(className: string, condition?: boolean, options?: Dojo.Fx.CreateOptions): Animation; val(): string; val(values: string | string[]): dojo.NodeList; wipeIn(args: Dojo.Fx.AutoCreateOptions | Dojo.Fx.CreateOptions): dojo.NodeList; wipeOut(args: Dojo.Fx.AutoCreateOptions | Dojo.Fx.CreateOptions): dojo.NodeList; wrap(html: string | HTMLElement): dojo.NodeList; wrapAll(html: string | HTMLElement): dojo.NodeList; wrapInner(html: string | HTMLElement): dojo.NodeList; } } declare module "dojo/NodeList" { var NodeList: typeof dojo.NodeList; export = NodeList; } declare function require(module: "dojo/NodeList"): dojo.NodeList; // dojo/NodeList-??? declare module "dojo/NodeList-data" { var NodeList: typeof dojo.NodeList; export = NodeList; } declare function require(module: "dojo/NodeList-data"): dojo.NodeList; declare module "dojo/NodeList-dom" { var NodeList: typeof dojo.NodeList; export = NodeList; } declare function require(module: "dojo/NodeList-dom"): dojo.NodeList; declare module "dojo/NodeList-fx" { var NodeList: typeof dojo.NodeList; export = NodeList; } declare function require(module: "dojo/NodeList-fx"): dojo.NodeList; declare module "dojo/NodeList-html" { var NodeList: typeof dojo.NodeList; export = NodeList; } declare function require(module: "dojo/NodeList-html"): dojo.NodeList; declare module "dojo/NodeList-manipulate" { var NodeList: typeof dojo.NodeList; export = NodeList; } declare function require(module: "dojo/NodeList-manipulate"): dojo.NodeList; declare module "dojo/NodeList-traverse" { var NodeList: typeof dojo.NodeList; export = NodeList; } declare function require(module: "dojo/NodeList-traverse"): dojo.NodeList; // dojo/number declare module Dojo { interface Number { format(value: number, options?: { pattern?: string; type?: string; locale?: string; round?: number; fractional?: boolean; places?: number; }): string; parse(expression: string, options?: { pattern?: string; type?: string; locale?: string; strict?: boolean; fractional?: boolean; }): number; regexp(options?: { pattern?: string; type?: string; locale?: string; strict?: boolean; places?: number; }): RegExp; round(value: number, places?: number, increment?: number): number; } } declare module "dojo/number" { var num: Dojo.Number; export = num; } declare function require(module: "dojo/number"): Dojo.Number; // dojo/on declare module Dojo { interface On { (target: HTMLElement, type: "abort", listener: EventListener<UIEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "afterprint", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "beforeprint", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "beforeunload", listener: EventListener<BeforeUnloadEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "blur", listener: EventListener<FocusEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "canplay", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "canplaythrough", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "change", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "click", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "contextmenu", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "dblclick", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "drag", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "dragend", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "dragenter", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "dragleave", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "dragover", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "dragstart", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "drop", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "durationchange", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "emptied", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "ended", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "focus", listener: EventListener<FocusEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "hashchange", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "input", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "keydown", listener: EventListener<KeyboardEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "keypress", listener: EventListener<KeyboardEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "keyup", listener: EventListener<KeyboardEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "load", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "loadeddata", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "loadedmetadata", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "loadstart", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "message", listener: EventListener<MessageEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "mousedown", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "mousemove", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "mouseout", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "mouseover", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "mouseup", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "mousewheel", listener: EventListener<MouseWheelEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "offline", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "online", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "pause", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "play", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "playing", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "progress", listener: (ev: any) => void, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "ratechange", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "readystatechange", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "reset", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "resize", listener: EventListener<UIEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "scroll", listener: EventListener<UIEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "seeked", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "seeking", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "select", listener: EventListener<UIEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "stalled", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "storage", listener: EventListener<StorageEvent>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "submit", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "suspend", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "timeupdate", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "unload", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "volumechange", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: "waiting", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: string, listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: Object, type: string, listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; (target: HTMLElement, type: Dojo.ExtensionEvent, listener: Dojo.Action, dontFix?: boolean): Dojo.RemovableHandle; (target: Object, type: Dojo.ExtensionEvent, listener: Dojo.Action, dontFix?: boolean): Dojo.RemovableHandle; emit(target: Object, type: string | Dojo.ExtensionEvent, event: { bubbles?: boolean; cancelable?: boolean; }): void; selector(cssSelector: string, event: string | Dojo.ExtensionEvent, children?: boolean): Dojo.ExtensionEvent; pausable(target: HTMLElement, type: "abort", listener: EventListener<UIEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "afterprint", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "beforeprint", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "beforeunload", listener: EventListener<BeforeUnloadEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "blur", listener: EventListener<FocusEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "canplay", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "canplaythrough", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "change", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "click", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "contextmenu", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "dblclick", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "drag", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "dragend", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "dragenter", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "dragleave", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "dragover", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "dragstart", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "drop", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "durationchange", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "emptied", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "ended", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "focus", listener: EventListener<FocusEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "hashchange", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "input", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "keydown", listener: EventListener<KeyboardEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "keypress", listener: EventListener<KeyboardEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "keyup", listener: EventListener<KeyboardEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "load", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "loadeddata", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "loadedmetadata", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "loadstart", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "message", listener: EventListener<MessageEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "mousedown", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "mousemove", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "mouseout", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "mouseover", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "mouseup", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "mousewheel", listener: EventListener<MouseWheelEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "offline", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "online", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "pause", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "play", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "playing", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "progress", listener: (ev: any) => void, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "ratechange", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "readystatechange", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "reset", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "resize", listener: EventListener<UIEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "scroll", listener: EventListener<UIEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "seeked", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "seeking", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "select", listener: EventListener<UIEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "stalled", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "storage", listener: EventListener<StorageEvent>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "submit", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "suspend", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "timeupdate", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "unload", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "volumechange", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: "waiting", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement, type: string, listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; pausable(target: HTMLElement | Object, type: string | Dojo.ExtensionEvent, listener: Dojo.Action, dontFix?: boolean): Dojo.PausableHandle; once(target: HTMLElement, type: "afterprint", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "beforeprint", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "beforeunload", listener: EventListener<BeforeUnloadEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "blur", listener: EventListener<FocusEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "canplay", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "canplaythrough", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "change", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "click", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "contextmenu", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "dblclick", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "drag", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "dragend", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "dragenter", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "dragleave", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "dragover", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "dragstart", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "drop", listener: EventListener<DragEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "durationchange", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "emptied", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "ended", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "focus", listener: EventListener<FocusEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "hashchange", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "input", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "keydown", listener: EventListener<KeyboardEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "keypress", listener: EventListener<KeyboardEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "keyup", listener: EventListener<KeyboardEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "load", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "loadeddata", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "loadedmetadata", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "loadstart", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "message", listener: EventListener<MessageEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "mousedown", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "mousemove", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "mouseout", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "mouseover", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "mouseup", listener: EventListener<MouseEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "mousewheel", listener: EventListener<MouseWheelEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "offline", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "online", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "pause", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "play", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "playing", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "progress", listener: (ev: any) => void, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "ratechange", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "readystatechange", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "reset", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "resize", listener: EventListener<UIEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "scroll", listener: EventListener<UIEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "seeked", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "seeking", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "select", listener: EventListener<UIEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "stalled", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "storage", listener: EventListener<StorageEvent>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "submit", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "suspend", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "timeupdate", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "unload", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "volumechange", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: "waiting", listener: EventListener<Event>, dontFix?: boolean): Dojo.RemovableHandle; once(target: HTMLElement, type: string, listener: EventListener<Event>, dontFix?: boolean): void; once(target: HTMLElement | Object, type: string | Dojo.ExtensionEvent, listener: Dojo.Action, dontFix?: boolean): void; } } declare module "dojo/on" { var on: Dojo.On; export = on; } declare function require(module: "dojo/on"): Dojo.On; // dojo/parser interface _ArrayOrPromise<T> extends Array<T>, Dojo.PromiseLike<T> { } declare module Dojo { interface _ParseOptions { noStart?: boolean; rootNode?: HTMLElement; template?: boolean; inherited?: Object; scope?: string; propsThis?: Object; contextRequire?: Function; } interface Parser { parse(rootNode?: HTMLElement, options?: _ParseOptions): _ArrayOrPromise<dijit._WidgetBase>; scan(root?: HTMLElement, options?: _ParseOptions): dojo.Promise<HTMLElement[]>; instantiate(nodes: HTMLElement[], mixin?: PropertiesMap, options?: _ParseOptions): dijit._WidgetBase[]; construct(ctor: { (params?: Dijit.WidgetCreateOptions): dijit._WidgetBase; }, node: HTMLElement, mixin?: PropertiesMap, options?: _ParseOptions, scripts?: HTMLElement[], inherited?: Object): dijit._WidgetBase; } } declare module "dojo/parser" { var parser: Dojo.Parser; export = parser; } declare function require(module: "dojo/parser"): Dojo.Parser; // dojo/promise/Promise declare module "dojo/promise/Promise" { var Promise: typeof dojo.Promise; export = Promise; } declare function require<T>(module: "dojo/promise/promise"): dojo.Promise<T>; // dojo/promise/all declare module Dojo { module Promise { interface All { <T>(promises: T[] | dojo.Promise<T>[]): dojo.Promise<T[]>; <T>(promises: Dojo.Dictionary<dojo.Promise<T>>): dojo.Promise<Dojo.Dictionary<T>>; <T>(promises: T): dojo.Promise<T>; } } } declare module "dojo/promise/all" { var all: Dojo.Promise.All; export = all; } declare function require(module: "dojo/promise/all"): Dojo.Promise.All; // dojo/promise/first declare module Dojo { module Promise { interface First { <T>(promises: T[] | dojo.Promise<T>[] | Dojo.Dictionary<dojo.Promise<T>>): dojo.Promise<T>; <T>(promises: T): dojo.Promise<T>; } } } declare module "dojo/promise/first" { var first: Dojo.Promise.First; export = first; } declare function require(module: "dojo/promise/first"): Dojo.Promise.First; // dojo/query declare module Dojo { interface Query { (selector: string, context?: string | Object): dojo.NodeList; NodeList: dojo.NodeList; } } declare module "dojo/query" { var query: Dojo.Query; export = query; } declare function require(module: "dojo/query"): Dojo.Query; declare module "dojo/query!css2" { var query: Dojo.Query; export = query; } declare function require(module: "dojo/query!css2"): Dojo.Query; declare module "dojo/query!lite" { var query: Dojo.Query; export = query; } declare function require(module: "dojo/query!lite"): Dojo.Query; declare module "dojo/query!css2.1" { var query: Dojo.Query; export = query; } declare function require(module: "dojo/query!css2.1"): Dojo.Query; declare module "dojo/query!css3" { var query: Dojo.Query; export = query; } declare function require(module: "dojo/query!css3"): Dojo.Query; declare module "dojo/query!acme" { var query: Dojo.Query; export = query; } declare function require(module: "dojo/query!acme"): Dojo.Query; // dojo/regexp declare module Dojo { interface RegExp { buildGroupRE(arr: any | any[], re: (item: any) => RegExp, nonCapture?: boolean): RegExp; escapeString(str: string, except?: string): string; group(expression: string, nonCapture?: boolean): string; } } declare module "dojo/regexp" { var regexp: Dojo.RegExp; export = regexp; } declare function require(module: "dojo/regexp"): Dojo.RegExp; // dojo/request declare module Dojo { module Request { interface MethodOptions { method: string; } interface BaseOptions { handleAs?: string; headers?: { [header: string]: string; }; sync?: boolean; data?: any; query?: any; timeout?: number; preventCache?: boolean; content?: Object; } interface Options extends BaseOptions, MethodOptions { } interface RequestObject<T, BaseOptions, Options> { <T>(url: string, options?: Options): dojo.Promise<T>; get<T>(url: string, options?: BaseOptions): dojo.Promise<T>; put<T>(url: string, options?: BaseOptions): dojo.Promise<T>; post<T>(url: string, options?: BaseOptions): dojo.Promise<T>; del<T>(url: string, options?: BaseOptions): dojo.Promise<T>; } } } declare module Dojo { module Request { interface Base { <T>(url: string, options?: Options): dojo.Promise<T>; get<T>(url: string, options?: BaseOptions): dojo.Promise<T>; put<T>(url: string, options?: BaseOptions): dojo.Promise<T>; post<T>(url: string, options?: BaseOptions): dojo.Promise<T>; del<T>(url: string, options?: BaseOptions): dojo.Promise<T>; } } } declare module "dojo/request" { var request: Dojo.Request.Base; export = request; } declare function require(module: "dojo/request"): Dojo.Request.Base; declare module Dojo { module Request { module Xhr { interface Error { status: number; responseText: string; } interface BaseOptions extends Request.BaseOptions { } interface Options extends BaseOptions, MethodOptions { } interface Base { <T>(url: string, options?: Options): dojo._PromiseBase<T, Error>; get<T>(url: string, options?: BaseOptions): dojo._PromiseBase<T, Error>; put<T>(url: string, options?: BaseOptions): dojo._PromiseBase<T, Error>; post<T>(url: string, options?: BaseOptions): dojo._PromiseBase<T, Error>; del<T>(url: string, options?: BaseOptions): dojo._PromiseBase<T, Error>; } } } } declare module "dojo/request/xhr" { var request: Dojo.Request.Xhr.Base; export = request; } declare function require(module: "dojo/request/xhr"): Dojo.Request.Xhr.Base; declare module Dojo { module Request { module Node { interface BaseOptions extends Request.BaseOptions { user: string; password: string; } interface Options extends BaseOptions, MethodOptions { } interface Base { <T>(url: string, options?: Options): dojo.Promise<T>; get<T>(url: string, options?: BaseOptions): dojo.Promise<T>; put<T>(url: string, options?: BaseOptions): dojo.Promise<T>; post<T>(url: string, options?: BaseOptions): dojo.Promise<T>; del<T>(url: string, options?: BaseOptions): dojo.Promise<T>; } } } } declare module "dojo/request/node" { var request: Dojo.Request.Node.Base; export = request; } declare function require(module: "dojo/request/node"): Dojo.Request.Node.Base; declare module Dojo { module Request { module iFrame { interface BaseOptions extends Request.BaseOptions { form?: HTMLElement; } interface Options extends BaseOptions, MethodOptions { } interface Base { <T>(url: string, options?: Options): dojo.Promise<T>; get<T>(url: string, options?: BaseOptions): dojo.Promise<T>; post<T>(url: string, options?: BaseOptions): dojo.Promise<T>; } } } } declare module "dojo/request/iframe" { var request: Dojo.Request.iFrame.Base; export = request; } declare function require(module: "dojo/request/iframe"): Dojo.Request.iFrame.Base; declare module Dojo { module Request { module Script { interface BaseOptions extends Request.BaseOptions { frameDoc?: HTMLDocument; jsonp?: string; checkString?: string; } interface Options extends BaseOptions, MethodOptions { } interface Base { <T>(url: string, options?: Options): dojo.Promise<T>; get<T>(url: string, options?: BaseOptions): dojo.Promise<T>; post<T>(url: string, options?: BaseOptions): dojo.Promise<T>; } } } } declare module "dojo/request/script" { var request: Dojo.Request.Script.Base; export = request; } declare function require(module: "dojo/request/script"): Dojo.Request.Script.Base; declare module Dojo { module Request { interface Handlers { register(name: string, handler: (response: any) => any): void; } } } declare module "dojo/request/handlers" { var handlers: Dojo.Request.Handlers; export = handlers; } declare function require(module: "dojo/request/handlers"): Dojo.Request.Handlers; declare module Dojo { module Request { interface Notify { notify(type?: "start", listener?: SimpleAction): RemovableHandle; notify(type?: "send", listener?: (response: any, cancel: () => void) => void): RemovableHandle; notify(type?: "load", listener?: (response: any) => void): RemovableHandle; notify(type?: "error", listener?: (error: any) => void): RemovableHandle; notify(type?: "done", listener?: (responseOrError: any) => void): RemovableHandle; notify(type?: "stop", listener?: SimpleAction): RemovableHandle; notify(type?: string, listener?: Action): RemovableHandle; } } } declare module "dojo/request/notify" { var notify: Dojo.Request.Notify; export = notify; } declare function require(module: "dojo/request/notify"): Dojo.Request.Notify; declare module Dojo { module Request { interface Registry { <T>(url: string, options?: Options): dojo.Promise<T>; get<T>(url: string, options?: BaseOptions): dojo.Promise<T>; put<T>(url: string, options?: BaseOptions): dojo.Promise<T>; post<T>(url: string, options?: BaseOptions): dojo.Promise<T>; del<T>(url: string, options?: BaseOptions): dojo.Promise<T>; register<T>(url: string | RegExp | ((url: string, options: Options) => boolean), provider: RequestObject<T, BaseOptions, Options>, first?: boolean): RemovableHandle; } } } declare module "dojo/request/registry" { var registry: Dojo.Request.Registry; export = registry; } declare function require(module: "dojo/request/registry"): Dojo.Request.Registry; // dojo/router declare module Dojo { interface RouterEvent { params: Dojo.Dictionary<string>; oldPath: string; newPath: string; preventDefault: Dojo.SimpleAction; stopImmediatePropagation: Dojo.SimpleAction; } interface Router { register(route: string | RegExp, callback: EventListener<RouterEvent>): RemovableHandle; registerBefore(route: string | RegExp, callback: EventListener<RouterEvent>): RemovableHandle; startup(defaultPath?: string): void; destroy(): void; go(path: string, replace?: boolean): boolean; } } declare module "dojo/router" { var router: Dojo.Router; export = router; } declare function require(module: "dojo/router"): Dojo.Router; // dojo/sniff declare module Dojo { interface Sniff extends Has { } } declare module "dojo/sniff" { var sniff: Dojo.Sniff; export = sniff; } declare function require(module: "dojo/sniff"): Dojo.Sniff; // dojo/text declare module "dojo/text" { } declare function require(module: "dojo/text"): void; // dojo/topic declare module Dojo { interface Topic { subscribe(topic: string, listener: Action): RemovableHandle; publish(topic: string, ...v_args: any[]): void; } } declare module "dojo/topic" { var topic: Dojo.Topic; export = topic; } declare function require(module: "dojo/topic"): Dojo.Topic; // dojo/Stateful declare module "dojo/Stateful" { var Stateful: typeof dojo.Stateful; export = Stateful; } declare function require(module: "dojo/Stateful"): dojo.Stateful; // dojo/string declare module Dojo { interface String { pad(text: string, size: number, ch?: string, end?: boolean): string; rep(str: string, num: number): string; substitute(template: string, map: string[] | { [text: string]: string; }, transform?: (value: any, key: string) => string, thisObject?: Object): string; trim(str: string): string; } } declare module "dojo/string" { var str: Dojo.String; export = str; } declare function require(module: "dojo/string"): Dojo.String; // dojo/touch declare module Dojo { interface Touch { cancel(node: HTMLElement, listener: (ev: MouseEvent) => boolean): RemovableHandle; enter(node: HTMLElement, listener: (ev: MouseEvent) => boolean): RemovableHandle; leave(node: HTMLElement, listener: (ev: MouseEvent) => boolean): RemovableHandle; move(node: HTMLElement, listener: (ev: MouseEvent) => boolean): RemovableHandle; out(node: HTMLElement, listener: (ev: MouseEvent) => boolean): RemovableHandle; over(node: HTMLElement, listener: (ev: MouseEvent) => boolean): RemovableHandle; press(node: HTMLElement, listener: (ev: MouseEvent) => boolean): RemovableHandle; release(node: HTMLElement, listener: (ev: MouseEvent) => boolean): RemovableHandle; } } declare module "dojo/touch" { var touch: Dojo.Touch; export = touch; } declare function require(module: "dojo/touch"): Dojo.Touch; // dojo/uacss declare module Dojo { interface Uacss extends Has { } } declare module "dojo/uacss" { var uacss: Dojo.Uacss; export = uacss; } declare function require(module: "dojo/uacss"): Dojo.Uacss; // dojo/when declare module Dojo { interface When { <T>(value: T, callback?: (value: T) => any, errback?: (error: any) => void, progback?: (update: any) => void): void; } } declare module "dojo/when" { var when: Dojo.When; export = when; } declare function require(module: "dojo/when"): Dojo.When; // dojo/window declare module Dojo { interface Window { get(doc: HTMLDocument): Window; getBox(doc: HTMLDocument): Position; scrollIntoView(node: HTMLElement, pos?: Object): void; } } declare module "dojo/window" { var window: Dojo.Window; export = window; } declare function require(module: "dojo/window"): Dojo.Window;<|fim▁end|>
{ var supplemental: Dojo.Cldr.Supplemental; export = supplemental;
<|file_name|>test_util.py<|end_file_name|><|fim▁begin|>import unittest import numpy as np from bayesnet.image.util import img2patch, patch2img class TestImg2Patch(unittest.TestCase): def test_img2patch(self): img = np.arange(16).reshape(1, 4, 4, 1) patch = img2patch(img, size=3, step=1) expected = np.asarray([ [img[0, 0:3, 0:3, 0], img[0, 0:3, 1:4, 0]], [img[0, 1:4, 0:3, 0], img[0, 1:4, 1:4, 0]] ]) expected = expected[None, ..., None] self.assertTrue((patch == expected).all()) imgs = [ np.random.randn(2, 5, 6, 3), np.random.randn(3, 10, 10, 2), np.random.randn(1, 23, 17, 5) ] sizes = [ (1, 1), 2, (3, 4) ] steps = [ (1, 2), (3, 1), 3 ] shapes = [ (2, 5, 3, 1, 1, 3), (3, 3, 9, 2, 2, 2), (1, 7, 5, 3, 4, 5) ] for img, size, step, shape in zip(imgs, sizes, steps, shapes): self.assertEqual(shape, img2patch(img, size, step).shape) class TestPatch2Img(unittest.TestCase): def test_patch2img(self): img = np.arange(16).reshape(1, 4, 4, 1) patch = img2patch(img, size=2, step=2) self.assertTrue((img == patch2img(patch, (2, 2), (1, 4, 4, 1))).all()) patch = img2patch(img, size=3, step=1) expected = np.arange(0, 32, 2).reshape(1, 4, 4, 1) expected[0, 0, 0, 0] /= 2 expected[0, 0, -1, 0] /= 2 expected[0, -1, 0, 0] /= 2 expected[0, -1, -1, 0] /= 2 expected[0, 1:3, 1:3, 0] *= 2 self.assertTrue((expected == patch2img(patch, (1, 1), (1, 4, 4, 1))).all()) <|fim▁hole|><|fim▁end|>
if __name__ == '__main__': unittest.main()
<|file_name|>widgets.py<|end_file_name|><|fim▁begin|>from django import forms from django.core.urlresolvers import reverse from django.forms.widgets import RadioFieldRenderer from django.utils.encoding import force_text from django.utils.html import format_html from django.utils.safestring import mark_safe class BootstrapChoiceFieldRenderer(RadioFieldRenderer): """ An object used by RadioSelect to enable customization of radio widgets. """ def render(self): """ Outputs a <div> for this set of choice fields. If an id was given to the field, it is applied to the <di> (each item in the list will get an id of `$id_$i`). """ id_ = self.attrs.get('id', None) start_tag = format_html('<div id="{0}">', id_) if id_ else '<div>' output = [start_tag] for widget in self: output.append(format_html('<div class="radio">{0}</div>', force_text(widget))) output.append('</div>') return mark_safe('\n'.join(output)) class UseCustomRegWidget(forms.MultiWidget): """ This widget is for three fields on event add/edit under Registration: * use_custom_reg_form * reg_form * bind_reg_form_to_conf_only """ def __init__(self, attrs=None, reg_form_choices=None, event_id=None): self.attrs = attrs self.reg_form_choices = reg_form_choices self.event_id = event_id if not self.attrs: self.attrs = {'id': 'use_custom_reg'} self.widgets = ( forms.CheckboxInput(), forms.Select(attrs={'class': 'form-control'}), forms.RadioSelect(renderer=BootstrapChoiceFieldRenderer) ) super(UseCustomRegWidget, self).__init__(self.widgets, attrs) def render(self, name, value, attrs=None): if not isinstance(value, list): value = self.decompress(value) final_attrs = self.build_attrs(attrs) id_ = final_attrs.get('id', None) use_custom_reg_form_widget = self.widgets[0] rendered_use_custom_reg_form = self.render_widget( use_custom_reg_form_widget, name, value, final_attrs, 0, id_ ) reg_form_widget = self.widgets[1] reg_form_widget.choices = self.reg_form_choices #reg_form_widget.attrs = {'size':'8'} rendered_reg_form = self.render_widget( reg_form_widget, name, value, final_attrs, 1, id_ ) bind_reg_form_to_conf_only_widget = self.widgets[2] choices = ( ('1', mark_safe('Use one form for all pricings %s' % rendered_reg_form)), ) bind_reg_form_to_conf_only_widget.choices = choices rendered_bind_reg_form_to_conf_only = self.render_widget( bind_reg_form_to_conf_only_widget, name, value, final_attrs, 2, id_ ) rendered_bind_reg_form_to_conf_only = rendered_bind_reg_form_to_conf_only.replace( '%s</label>' % rendered_reg_form, "</label>%s" % rendered_reg_form ) <|fim▁hole|> <div> <a href="%s" target="_blank">Manage Custom Registration Form</a> </div> """ % reverse('event.event_custom_reg_form_list', args=[self.event_id]) else: manage_custom_reg_link = '' output_html = """ <div id="t-events-use-customreg-box"> <div id="t-events-use-customreg-checkbox" class="checkbox"> <label for="id_%s_%s">%s Use Custom Registration Form</label> </div> <div id="t-events-one-or-separate-form">%s</div> %s </div> """ % ( name, '0', rendered_use_custom_reg_form, rendered_bind_reg_form_to_conf_only, manage_custom_reg_link ) return mark_safe(output_html) def render_widget(self, widget, name, value, attrs, index=0, id=None): i = index id_ = id if value: try: widget_value = value[i] except IndexError: self.fields['use_reg_form'].initial = None else: widget_value = None if id_: final_attrs = dict(attrs, id='%s_%s' % (id_, i)) if widget.__class__.__name__.lower() != 'select': classes = final_attrs.get('class', None) if classes: classes = classes.split(' ') classes.remove('form-control') classes = ' '.join(classes) final_attrs['class'] = classes return widget.render(name+'_%s' %i, widget_value, final_attrs) def decompress(self, value): if value: data_list = value.split(',') if data_list[0] == '1': data_list[0] = 'on' return data_list return None<|fim▁end|>
if self.event_id: manage_custom_reg_link = """
<|file_name|>sv.js<|end_file_name|><|fim▁begin|>/* Copyright (c) 2003-2017, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.md or http://ckeditor.com/license */<|fim▁hole|>} );<|fim▁end|>
CKEDITOR.plugins.setLang( 'preview', 'sv', { preview: 'Förhandsgranska'
<|file_name|>multi_regular.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2005-2009 INRIA Sophia-Antipolis (France). // All rights reserved. // // This file is part of CGAL (www.cgal.org); you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public License as // published by the Free Software Foundation; either version 3 of the License, // or (at your option) any later version. // // Licensees holding a valid commercial license may use this file in // accordance with the commercial license agreement provided with the software. // // This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE // WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. // // $URL$ // $Id$ // // // Author(s) : Sebastien Loriot, Sylvain Pion #include <CGAL/Exact_predicates_inexact_constructions_kernel.h> #include <CGAL/CGAL_Ipelet_base.h> #include <CGAL/Regular_triangulation_euclidean_traits_2.h> #include <CGAL/Regular_triangulation_2.h> #include "include/CGAL_ipelets/k_delaunay.h" namespace CGAL_multi_regular{ typedef CGAL::Exact_predicates_inexact_constructions_kernel Kernel; typedef Kernel::FT FT; typedef CGAL::Regular_triangulation_euclidean_traits_2<Kernel,FT> Gt; typedef CGAL::Regular_triangulation_2<Gt> Regular; typedef Regular::Finite_edges_iterator itEdge; // -------------------------------------------------------------------- const std::string sublabel[] = { "Regular", "Regular 2", "Regular 3","Regular n-1", "Regular k", "Power Diagram", "Power Diagram 2", "Power Diagram 3", "Power Diagram n-1", "Power Diagram k", "Help" }; const std::string hlpmsg[] = { "Generate k-th regular triangulation and k-th dual Power diagram. Note : k must be smaller than the number of input circles." }; class MregularIpelet : public CGAL::Ipelet_base<Kernel,11> { public: MregularIpelet() : CGAL::Ipelet_base<Kernel,11>("k-order Regular",sublabel,hlpmsg){} void protected_run(int); }; // -------------------------------------------------------------------- // -------------------------------------------------------------------- void MregularIpelet::protected_run(int fn) { Regular rt; std::vector<Weighted_point_2> input_wpt; if (fn==10) { show_help(false); return;<|fim▁hole|> read_active_objects( CGAL::dispatch_or_drop_output<Point_2,Circle_2>( wpoint_grabber(std::back_inserter(input_wpt)), wpoint_grabber(std::back_inserter(input_wpt)) ) ); if (!input_wpt.size()) { print_error_message("No circle selected"); return; } int order = 0; if(fn==0 || fn==5) order = 1; if(fn==1 || fn==6) order = 2; if(fn==2 || fn==7) order = 3; if(fn==3 || fn==8) order = input_wpt.size()-1;; if(fn==4 || fn==9){ int ret_val; boost::tie(ret_val,order)=request_value_from_user<int>("Enter order"); if (ret_val < 0){ print_error_message("Incorrect value"); return; } if(order<1 || order>=(int) input_wpt.size()){ print_error_message("Not a good order"); return; } } k_delaunay<Kernel>(rt,input_wpt,order); if(fn<5)//Draw k-th regular triangulation draw_in_ipe(rt); else{//Draw kth Power diagram double incr_len=75; bbox=Iso_rectangle_2(bbox.min()+Kernel::Vector_2(-incr_len,-incr_len), bbox.max()+Kernel::Vector_2(incr_len,incr_len)); draw_dual_in_ipe(rt,bbox); //draw Voronoi Diagram } } } CGAL_IPELET(CGAL_multi_regular::MregularIpelet)<|fim▁end|>
} Iso_rectangle_2 bbox=
<|file_name|>list.py<|end_file_name|><|fim▁begin|><|fim▁hole|>"""List iSCSI Snapshots.""" # :license: MIT, see LICENSE for more details. import SoftLayer from SoftLayer.CLI import environment from SoftLayer.CLI import formatting from SoftLayer.CLI import helpers from SoftLayer import utils import click @click.command() @click.argument('iscsi-identifier') @environment.pass_env def cli(env, iscsi_identifier): """List iSCSI Snapshots.""" iscsi_mgr = SoftLayer.ISCSIManager(env.client) iscsi_id = helpers.resolve_id(iscsi_mgr.resolve_ids, iscsi_identifier, 'iSCSI') iscsi = env.client['Network_Storage_Iscsi'] snapshots = iscsi.getPartnerships( mask='volumeId,partnerVolumeId,createDate,type', id=iscsi_id) snapshots = [utils.NestedDict(n) for n in snapshots] table = formatting.Table(['id', 'createDate', 'name', 'description']) for snapshot in snapshots: table.add_row([ snapshot['partnerVolumeId'], snapshot['createDate'], snapshot['type']['name'], snapshot['type']['description'], ]) return table<|fim▁end|>
<|file_name|>selectors.cpp<|end_file_name|><|fim▁begin|>/* * selectors.cpp * * Created on: 10/07/2015 * * ========================================================================= * Copyright (C) 2015-, Daniele De Sensi ([email protected]) * * This file is part of nornir. * * nornir is free software: you can redistribute it and/or * modify it under the terms of the Lesser GNU General Public * License as published by the Free Software Foundation, either * version 3 of the License, or (at your option) any later version. * nornir is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * Lesser GNU General Public License for more details. * * You should have received a copy of the Lesser GNU General Public * License along with nornir. * If not, see <http://www.gnu.org/licenses/>. * * ========================================================================= */ #include <cfloat> #include <iostream> #include <nornir/selectors.hpp> #include <nornir/utils.hpp> #include <riff/external/cppnanomsg/nn.hpp> #include <riff/external/nanomsg/src/pair.h> #include <sstream> #include <string> #undef DEBUG #undef DEBUGB #ifdef DEBUG_SELECTORS #define DEBUG(x) \ do { \ std::cerr << "[Selectors] " << x << std::endl; \ } while (0) #define DEBUGB(x) \ do { \ x; \ } while (0) #else #define DEBUG(x) #define DEBUGB(x) #endif #define NOT_VALID DBL_MIN #define STORE_PREDICTIONS 1 using namespace mammut::cpufreq; using namespace mammut::energy; using namespace mammut::utils; using namespace mammut::topology; using namespace std; namespace nornir { Selector::Selector(const Parameters &p, const Configuration &configuration, const Smoother<MonitoredSample> *samples) : _calibrationStartMs(0), _calibrationStartTasks(0), _totalCalibrationTime(0), _calibrating(false), _ignoreViolations(false), _p(p), _configuration(configuration), _samples(samples), _numCalibrationPoints(0), _forced(false), _forcedReturned(false), _calibrationCoordination(false), _calibrationAllowed(false), _totalTasks(0), _remainingTasks(p.requirements.expectedTasksNumber) { //_joulesCounter = _localMammut.getInstanceEnergy()->getCounter(); _numPhyCores = _p.mammut.getInstanceTopology()->getPhysicalCores().size(); // TODO Fare meglio con mammut // TODO Assicurarsi che il numero totale di configurazioni possibili sia // maggiore del numero minimo di punti #if 0 // Input bandwidth smoother if(p.strategySmoothing == STRATEGY_SMOOTHING_EXPONENTIAL){ _bandwidthIn = new MovingAverageExponential<double>(p.smoothingFactor); }else{ _bandwidthIn = new MovingAverageSimple<double>(p.smoothingFactor); } #endif _bandwidthIn = new MovingAverageSimple<double>(3); } bool Selector::isFeasibleThroughput(double value, bool conservative) const { if (isPrimaryRequirement(_p.requirements.throughput)) { double conservativeOffset = 0; if (conservative && _p.conservativeValue) { conservativeOffset = _p.requirements.throughput * (_p.conservativeValue / 100.0); } if (value < _p.requirements.throughput + conservativeOffset) { return false; } } return true; } bool Selector::isFeasibleLatency(double value, bool conservative) const { return true; } bool Selector::isFeasibleUtilization(double value, bool conservative) const { if (isPrimaryRequirement(_p.requirements.minUtilization)) { double conservativeOffset = 0; if (conservative && _p.conservativeValue) { conservativeOffset = (_p.requirements.maxUtilization - _p.requirements.minUtilization) * (_p.conservativeValue / 100.0) / 2.0; } if (value < _p.requirements.minUtilization + conservativeOffset || value > _p.requirements.maxUtilization - conservativeOffset) { return false; } } return true; } bool Selector::isFeasiblePower(double value, bool conservative) const { if (isPrimaryRequirement(_p.requirements.powerConsumption)) { double conservativeOffset = 0; if (conservative && _p.conservativeValue) { conservativeOffset = _p.requirements.powerConsumption * (_p.conservativeValue / 100.0); } return value < _p.requirements.powerConsumption - conservativeOffset; } return true; } bool Selector::isFeasibleTime(double value, bool conservative) const { if (isPrimaryRequirement(_p.requirements.executionTime)) { double conservativeOffset = 0; if (conservative && _p.conservativeValue) { conservativeOffset = _p.requirements.executionTime * (_p.conservativeValue / 100.0); } return value < _p.requirements.executionTime - conservativeOffset; } return true; } bool Selector::isFeasibleEnergy(double value, bool conservative) const { if (isPrimaryRequirement(_p.requirements.energy)) { double conservativeOffset = 0; if (conservative && _p.conservativeValue) { conservativeOffset = _p.requirements.energy * (_p.conservativeValue / 100.0); } return value < _p.requirements.energy - conservativeOffset; } return true; } bool Selector::isContractViolated() const { if (_ignoreViolations) { return false; } MonitoredSample avg = _samples->average(); double avgTime = _remainingTasks / avg.throughput; double avgEnergy = avgTime * avg.watts; return !isFeasibleThroughput(avg.throughput, false) || !isFeasibleLatency(avg.latency, false) || !isFeasibleUtilization(avg.loadPercentage, false) || !isFeasiblePower(avg.watts, false) || !isFeasibleTime(avgTime, false) || !isFeasibleEnergy(avgEnergy, false); } void Selector::startCalibration() { if (!_calibrating) { DEBUG("Starting calibration."); _calibrating = true; if (_calibrationCoordination) { while (!_calibrationAllowed) { ; } _calibrationAllowed = false; } _numCalibrationPoints = 0; _calibrationStartMs = getMillisecondsTime(); _calibrationStartTasks = _totalTasks; // Removed because there are issues with XU3 (we cannot have multiple concurrent energy counters) // We should either remove the joules calibration stats or fix it (e.g. by storing the joules every // time that getNextKnobsValues is called). /* if (_joulesCounter) { _joulesCounter->reset(); } */ } } void Selector::stopCalibration() { DEBUG("Stopping calibration."); _calibrating = false; if (_numCalibrationPoints) { CalibrationStats cs; cs.numSteps = _numCalibrationPoints; cs.duration = (getMillisecondsTime() - _calibrationStartMs); _totalCalibrationTime += cs.duration; cs.numTasks = _totalTasks - _calibrationStartTasks; // Removed because there are issues with XU3 (we cannot have multiple concurrent energy counters) // We should either remove the joules calibration stats or fix it (e.g. by storing the joules every // time that getNextKnobsValues is called). /* if (_joulesCounter) { cs.joules = _joulesCounter->getJoules(); } */ _calibrationStats.push_back(cs); _numCalibrationPoints = 0; } } double Selector::getTotalCalibrationTime() const { return _totalCalibrationTime; } void Selector::resetTotalCalibrationTime() { _totalCalibrationTime = 0; } void Selector::setCalibrationCoordination() { _calibrationCoordination = true; } void Selector::allowCalibration() { _calibrationAllowed = true; } void Selector::ignoreViolations() { _ignoreViolations = true; } void Selector::acceptViolations() { _ignoreViolations = false; } std::vector<CalibrationStats> Selector::getCalibrationsStats() const { return _calibrationStats; } bool Selector::isCalibrating() const { return _calibrating; } void Selector::forceConfiguration(KnobsValues &kv) { _forced = true; _forcedReturned = false; _forcedConfiguration = kv; } void Selector::updateTotalTasks(u_int64_t totalTasks) { _totalTasks = totalTasks; _remainingTasks = _p.requirements.expectedTasksNumber - totalTasks; } void Selector::updateBandwidthIn() { if (_samples->average().loadPercentage < MAX_RHO && !_samples->average().inconsistent) { if (_bandwidthIn->average() == numeric_limits<double>::max()) { _bandwidthIn->reset(); } _bandwidthIn->add(_samples->getLastSample().throughput); } else if (!_bandwidthIn->size()) { _bandwidthIn->add(numeric_limits<double>::max()); } } double Selector::initBestValue() const { if (_p.requirements.throughput == NORNIR_REQUIREMENT_MAX) { return NORNIR_REQUIREMENT_MIN; } else { return NORNIR_REQUIREMENT_MAX; } } double Selector::initBestSuboptimalValue() const { if (isPrimaryRequirement(_p.requirements.throughput)) { return NORNIR_REQUIREMENT_MIN; } if (isPrimaryRequirement(_p.requirements.latency)) { return NORNIR_REQUIREMENT_MAX; } if (isPrimaryRequirement(_p.requirements.maxUtilization)) { return NORNIR_REQUIREMENT_MAX; } if (isPrimaryRequirement(_p.requirements.powerConsumption)) { return NORNIR_REQUIREMENT_MAX; } /** * If no primary requirement (i.e. we only have a MIN/MAX * requirement) we can return anything, since we will never * check the suboptimal values (all values are always feasible). **/ return 0; } bool Selector::areKnobsValid(const KnobsValues &kv) { if (!_p.knobHyperthreadingEnabled) { return true; } else { //DEBUG("Checking validity of " << kv); double valueVC = kv[KNOB_VIRTUAL_CORES], valueHT = kv[KNOB_HYPERTHREADING]; if (!kv.areReal()) { Knob *kVC = _configuration.getKnob(KNOB_VIRTUAL_CORES); Knob *kHT = _configuration.getKnob(KNOB_HYPERTHREADING); if (!kVC->getRealFromRelative(kv[KNOB_VIRTUAL_CORES], valueVC)) { return false; } if (!kHT->getRealFromRelative(kv[KNOB_HYPERTHREADING], valueHT)) { return false; } } if (valueVC / valueHT <= _numPhyCores && (int) valueVC % (int) valueHT == 0) { //DEBUG("Valid"); return true; } else { //DEBUG("Invalid " << valueVC << " " << valueHT); return false; } } } SelectorManualCli::SelectorManualCli(const Parameters &p, const Configuration &configuration, const Smoother<MonitoredSample> *samples) : Selector(p, configuration, samples) { ; } SelectorManualCli::~SelectorManualCli() { ; } KnobsValues SelectorManualCli::getNextKnobsValues() { std::ifstream instream(getSelectorManualCliControlFile()); if (!instream.is_open()) { // If the file where the configuration should be specified // has not yet been created, remain in the same configuration. return _configuration.getRealValues(); } else { KnobsValues kv(KNOB_VALUE_RELATIVE); instream >> kv; instream.close(); return kv; } } SelectorManualWeb::SelectorManualWeb(const Parameters &p, const Configuration &configuration, const Smoother<MonitoredSample> *samples) : Selector(p, configuration, samples), _connected(false) { _socket = nn_socket(AF_SP, NN_PAIR); assert(_socket >= 0); } SelectorManualWeb::~SelectorManualWeb() { ; } KnobsValues SelectorManualWeb::getNextKnobsValues() { if (!_connected) { if (nn_connect(_socket, "ws://0.0.0.0:3001") >= 0) { _connected = true; } } if (_connected) { char *buf = NULL; KnobsValues kv(KNOB_VALUE_RELATIVE); bool stop = false; do { // We need to execute in a loop since the interface // may have sent multiple messages and we are interested // only in the last one (the most recent one). if (nn_recv(_socket, &buf, NN_MSG, NN_DONTWAIT) < 0) { if (errno == EAGAIN) { stop = true; } else { throw std::runtime_error("nn_recv failed."); } } else { std::stringstream ss(buf); ss >> kv; nn_freemsg(buf); } } while (!stop); if (buf) { return kv; } else { return _configuration.getRealValues(); } } else { return _configuration.getRealValues(); } } SelectorFixed::SelectorFixed(const Parameters &p, const Configuration &configuration, const Smoother<MonitoredSample> *samples) : Selector(p, configuration, samples) { ; } SelectorFixed::~SelectorFixed() { ; } KnobsValues SelectorFixed::getNextKnobsValues() { _previousConfiguration = _configuration.getRealValues(); return _configuration.getRealValues(); } SelectorPredictive::SelectorPredictive( const Parameters &p, const Configuration &configuration, const Smoother<MonitoredSample> *samples, std::unique_ptr<Predictor> throughputPredictor, std::unique_ptr<Predictor> powerPredictor) : Selector(p, configuration, samples), _throughputPredictor(std::move(throughputPredictor)), _powerPredictor(std::move(powerPredictor)), _feasible(true), _throughputPrediction(NOT_VALID), _powerPrediction(NOT_VALID) { /****************************************/ /* Predictors */ /****************************************/ _maxPerformance = -1; #if STORE_PREDICTIONS // Just to let the multi manager work even if this manager terminates // before making some predictions. const vector<KnobsValues> &combinations = _configuration.getAllRealCombinations(); for (size_t i = 0; i < combinations.size(); i++) { _performancePredictions[combinations.at(i)] = -1; _powerPredictions[combinations.at(i)] = -1; } #endif } SelectorPredictive::~SelectorPredictive() { ; } double SelectorPredictive::getRealThroughput(double predicted) const { if (_bandwidthIn->size()) { return min(predicted, _bandwidthIn->average()); } else { return predicted; } } double SelectorPredictive::getThroughputPrediction(const KnobsValues &values) { auto observation = _observedValues.find(_configuration.getRealValues(values)); double maxThroughput = 0.0; if (observation != _observedValues.end()) { maxThroughput = observation->second.getMaximumThroughput(); } else { _throughputPredictor->prepareForPredictions(); maxThroughput = _throughputPredictor->predict(values); }<|fim▁hole|> if (isPrimaryRequirement(_p.requirements.minUtilization)) { return maxThroughput; } else { return getRealThroughput(maxThroughput); } } double SelectorPredictive::getPowerPrediction(const KnobsValues &values) { auto observation = _observedValues.find(_configuration.getRealValues(values)); if (observation != _observedValues.end()) { return observation->second.watts; } else { _powerPredictor->prepareForPredictions(); return _powerPredictor->predict(values); } } const std::map<KnobsValues, double> & SelectorPredictive::getPrimaryPredictions() const { #if STORE_PREDICTIONS return _performancePredictions; #else throw std::runtime_error("Please define STORE_PREDICTIONS macro to 1."); #endif } const std::map<KnobsValues, double> & SelectorPredictive::getSecondaryPredictions() const { #if STORE_PREDICTIONS return _powerPredictions; #else throw std::runtime_error("Please define STORE_PREDICTIONS macro to 1."); #endif } bool SelectorPredictive::isBestMinMax(double throughput, double latency, double utilization, double power, double time, double energy, double &best) { // Throughput maximization if (_p.requirements.throughput == NORNIR_REQUIREMENT_MAX) { if (throughput > best) { best = throughput; return true; } else { return false; } } // Latency minimization if (_p.requirements.latency == NORNIR_REQUIREMENT_MIN) { throw std::runtime_error("Latency minimization not yet supported."); /* if(latency < best){ best = latency; return true; }else{ return false; } */ } // Utilization maximization if (_p.requirements.maxUtilization == NORNIR_REQUIREMENT_MAX) { if (utilization > best) { best = utilization; return true; } else { return false; } } // Power minimization if (_p.requirements.powerConsumption == NORNIR_REQUIREMENT_MIN) { if (power < best) { best = power; return true; } else { return false; } } // Energy minimization if (_p.requirements.energy == NORNIR_REQUIREMENT_MIN) { if (energy < best) { best = energy; return true; } else { return false; } } // If no MIN/MAX requirements, return true. return true; } bool SelectorPredictive::isBestSuboptimal(double throughput, double latency, double utilization, double power, double time, double energy, double &best) { // Throughput requirement if (isPrimaryRequirement(_p.requirements.throughput)) { if (throughput > best) { best = throughput; return true; } } // Latency requirement if (isPrimaryRequirement(_p.requirements.latency)) { throw std::runtime_error("Latency control not yet supported."); /* if(latency < best){ best = latency; return true; } */ } // Utilization requirement if (isPrimaryRequirement(_p.requirements.maxUtilization)) { // The best is the closest to minUtilization double distanceNew = _p.requirements.minUtilization - utilization; double distanceBest = _p.requirements.minUtilization - best; if (distanceNew > 0 && distanceBest < 0) { best = utilization; return true; } else if (!(distanceNew < 0 && distanceBest > 0) && abs(distanceNew) < abs(distanceBest)) { best = utilization; return true; } } // Power requirement if (isPrimaryRequirement(_p.requirements.powerConsumption)) { if (power < best) { best = power; return true; } } // Energy requirement if (isPrimaryRequirement(_p.requirements.energy)) { if (energy < best) { best = energy; return true; } } return false; } void SelectorPredictive::updateMaxPerformanceConfiguration(KnobsValues values, double performance) { if (performance > _maxPerformance) { _maxPerformance = performance; _maxPerformanceConfiguration = values; } } KnobsValues SelectorPredictive::getBestKnobsValues() { KnobsValues bestKnobs(KNOB_VALUE_REAL); KnobsValues bestSuboptimalKnobs = _configuration.getRealValues(); double bestValue = initBestValue(); double bestSuboptimalValue = initBestSuboptimalValue(); _feasible = false; #ifdef DEBUG_SELECTORS double bestThroughputPrediction = 0; // double bestLatencyPrediction = 0; double bestPowerPrediction = 0; double bestSuboptimalThroughputPrediction = 0; // double bestSuboptimalLatencyPrediction = 0; double bestSuboptimalPowerPrediction = 0; #endif bool bestKnobsSet = false; //std::cout << "Getting best." << std::endl; const vector<KnobsValues> &combinations = _configuration.getAllRealCombinations(); for (const KnobsValues &currentValues : combinations) { if (!areKnobsValid(currentValues)) { continue; } double throughputPrediction = getThroughputPrediction(currentValues); double powerPrediction = getPowerPrediction(currentValues); double utilizationPrediction = _bandwidthIn->average() / throughputPrediction * 100.0; double timePrediction = _remainingTasks / throughputPrediction; double energyPrediction = timePrediction * powerPrediction; // Skip negative predictions if (throughputPrediction < 0 || powerPrediction < 0 || utilizationPrediction < 0 || timePrediction < 0 || energyPrediction < 0) { //std::cout << "Skip negative" << std::endl; continue; } updateMaxPerformanceConfiguration(currentValues, throughputPrediction); #if STORE_PREDICTIONS _performancePredictions[currentValues] = throughputPrediction; _powerPredictions[currentValues] = powerPrediction; #endif #if 1 // DEBUG("Prediction: " << std::fixed << currentValues << " " << throughputPrediction << " " // << powerPrediction << " " << energyPrediction); //std::cout << "Prediction: " << currentValues << " " << throughputPrediction << " " // << powerPrediction << " " << energyPrediction << std::endl; #endif if (isFeasibleThroughput(throughputPrediction, true) && isFeasibleLatency(0, true) && isFeasibleUtilization(utilizationPrediction, true) && isFeasiblePower(powerPrediction, true) && isFeasibleTime(timePrediction, true) && isFeasibleEnergy(energyPrediction, true)) { _feasible = true; if (isBestMinMax(throughputPrediction, 0, utilizationPrediction, powerPrediction, timePrediction, energyPrediction, bestValue) || !bestKnobsSet) { #ifdef DEBUG_SELECTORS bestThroughputPrediction = throughputPrediction; bestPowerPrediction = powerPrediction; #endif bestKnobs = currentValues; bestKnobsSet = true; } } else if (isBestSuboptimal(throughputPrediction, 0, utilizationPrediction, powerPrediction, timePrediction, energyPrediction, bestSuboptimalValue)) { // TODO In realta' per controllare se e' un sottoottimale // migliore bisognerebbe prendere la configurazione che soddisfa // il maggior numero di constraints fra quelli specificati. // Solo in un secondo momento, se non ne soddisfa nessuno, si va // a cercare quello piu' 'vicino' #ifdef DEBUG_SELECTORS bestSuboptimalThroughputPrediction = throughputPrediction; bestSuboptimalPowerPrediction = powerPrediction; #endif bestSuboptimalKnobs = currentValues; } } if (_feasible) { DEBUG("Best solution found: " << bestKnobs); DEBUG("Throughput prediction: " << bestThroughputPrediction); DEBUG("Power prediction: " << bestPowerPrediction); return bestKnobs; } else { DEBUG("Suboptimal solution found: " << bestSuboptimalKnobs); DEBUG("Throughput prediction: " << bestSuboptimalThroughputPrediction); DEBUG("Power prediction: " << bestSuboptimalPowerPrediction); return bestSuboptimalKnobs; } } void SelectorPredictive::refine() { _throughputPredictor->refine(); _powerPredictor->refine(); if (_p.strategySelection == STRATEGY_SELECTION_LEARNING) { _observedValues[_configuration.getRealValues()] = _samples->average(); } } void SelectorPredictive::updatePredictions(const KnobsValues &next) { const KnobsValues real = _configuration.getRealValues(next); _throughputPrediction = getThroughputPrediction(real); _powerPrediction = getPowerPrediction(real); } bool SelectorPredictive::predictorsReady() const { return _throughputPredictor->readyForPredictions() && _powerPredictor->readyForPredictions(); } bool SelectorPredictive::predictionsDone() const { return _throughputPrediction != NOT_VALID && _powerPrediction != NOT_VALID; } void SelectorPredictive::clearPredictors() { _throughputPredictor->clear(); _powerPredictor->clear(); _throughputPrediction = NOT_VALID; _powerPrediction = NOT_VALID; _observedValues.clear(); } bool SelectorPredictive::isMaxPerformanceConfiguration() const { if (_maxPerformanceConfiguration.areUndefined()) { return false; } if (!_maxPerformanceConfiguration.areReal()) { throw std::runtime_error("[FATAL ERROR] _maxPerformanceConfiguration " "must be of type REAL."); } return _configuration.getRealValues() != _maxPerformanceConfiguration; } bool SelectorLearner::isAccurate() { double predictedMaxThroughput = _throughputPrediction; double predictedPower = _powerPrediction; double maxThroughput = _samples->average().getMaximumThroughput(); double power = _samples->average().watts; if (_p.requirements.minUtilization != NORNIR_REQUIREMENT_UNDEF) { predictedMaxThroughput = _bandwidthIn->average() / (_throughputPrediction / 100.0); } double performanceError = std::abs((maxThroughput - predictedMaxThroughput) / maxThroughput * 100.0); double powerError = std::abs((power - predictedPower) / power * 100.0); DEBUG("Perf error: " << performanceError); DEBUG("Power error: " << powerError); // For multi applications scenario we ignore power consumption model // inaccuracies. if(performanceError > _p.maxPerformancePredictionError || (!_calibrationCoordination && powerError > _p.maxPowerPredictionError) /* || _primaryPredictor->getModelError() > 10 || //TODO _secondaryPredictor->getModelError() > 10*/){ return false; } else { return true; } } KnobsValues SelectorLearner::getNextMeaningfulKnobsValues(Explorer *explorer) { while (true) { KnobsValues kv = explorer->nextRelativeKnobsValues(); if (areKnobsValid(kv)) { return kv; } } throw std::runtime_error( "No valid knobs. This situation should never happen!"); } bool SelectorPredictive::isBestSolutionFeasible() const { return _feasible; } SelectorAnalytical::SelectorAnalytical(const Parameters &p, const Configuration &configuration, const Smoother<MonitoredSample> *samples) : SelectorPredictive(p, configuration, samples, std::unique_ptr<Predictor>(new PredictorAnalytical( PREDICTION_THROUGHPUT, p, configuration, samples)), std::unique_ptr<Predictor>(new PredictorAnalytical( PREDICTION_POWER, p, configuration, samples))), _violations(0), _firstPointGenerated(false) { ; } KnobsValues SelectorAnalytical::getNextKnobsValues() { _previousConfiguration = _configuration.getRealValues(); KnobsValues kv; if (!_firstPointGenerated) { kv = getBestKnobsValues(); } if (isContractViolated() || (isPrimaryRequirement(_p.requirements.throughput) && _samples->average().throughput < _p.requirements.throughput)) { if (++_violations > _p.tolerableSamples) { _violations = 0; kv = getBestKnobsValues(); _bandwidthIn->reset(); } else { ++_violations; kv = _configuration.getRealValues(); } } else { stopCalibration(); if (_violations) { --_violations; } kv = _configuration.getRealValues(); } if (_configuration.equal(kv)) { stopCalibration(); } else { if (!_firstPointGenerated) { _firstPointGenerated = true; } else { startCalibration(); ++_numCalibrationPoints; _bandwidthIn->reset(); } } return kv; } void SelectorAnalytical::updateBandwidthIn() { // For this selector we do not consider input bandwidth for contracts // different from the utilization one. if (isPrimaryRequirement(_p.requirements.minUtilization)) { Selector::updateBandwidthIn(); } else { ; } } SelectorAnalyticalFull::SelectorAnalyticalFull( const Parameters &p, const Configuration &configuration, const Smoother<MonitoredSample> *samples) : SelectorPredictive(p, configuration, samples, std::unique_ptr<Predictor>(new PredictorAnalytical( PREDICTION_THROUGHPUT, p, configuration, samples)), std::unique_ptr<Predictor>(new PredictorAnalyticalFull( PREDICTION_POWER, p, configuration, samples))) { ; } KnobsValues SelectorAnalyticalFull::getNextKnobsValues() { _previousConfiguration = _configuration.getRealValues(); return getBestKnobsValues(); } void SelectorAnalyticalFull::updateBandwidthIn() { // For this selector we do not consider input bandwidth for contracts // different from the utilization one. if (isPrimaryRequirement(_p.requirements.minUtilization)) { Selector::updateBandwidthIn(); } else { ; } } std::unique_ptr<Predictor> SelectorLearner::getPredictor(PredictorType type, const Parameters &p, const Configuration &configuration, const Smoother<MonitoredSample> *samples) const { Predictor *predictor; switch (type) { case PREDICTION_THROUGHPUT: { switch (p.strategyPredictionPerformance) { case STRATEGY_PREDICTION_PERFORMANCE_AMDAHL: { #ifdef ENABLE_MLPACK if (p.knobMappingEnabled) { predictor = new PredictorRegressionMapping<PredictorLinearRegression>( type, p, configuration, samples); } else { predictor = new PredictorLinearRegression(type, p, configuration, samples); } #else throw std::runtime_error("Please recompile with -DENABLE_MLPACK=ON to " "use the required predictor."); #endif } break; case STRATEGY_PREDICTION_PERFORMANCE_USL: case STRATEGY_PREDICTION_PERFORMANCE_USLP: { #ifdef ENABLE_GSL if (p.knobMappingEnabled) { predictor = new PredictorRegressionMapping<PredictorUsl>( type, p, configuration, samples); } else { predictor = new PredictorUsl(type, p, configuration, samples); } #else throw std::runtime_error("Please recompile with -DENABLE_GSL=ON to use " "the required predictor."); #endif } break; case STRATEGY_PREDICTION_PERFORMANCE_LEO: { #ifdef ENABLE_ARMADILLO predictor = new PredictorLeo(type, p, configuration, samples); #else throw std::runtime_error("Please recompile with -DENABLE_ARMADILLO=ON to " "use the required predictor."); #endif } break; case STRATEGY_PREDICTION_PERFORMANCE_SMT: { #ifdef ENABLE_MLPACK predictor = new PredictorSMT(type, p, configuration, samples); #else throw std::runtime_error("Please recompile with -DENABLE_MLPACK=ON to " "use the required predictor."); #endif } break; default: { throw std::runtime_error("Unknown prediction strategy."); } break; } } break; case PREDICTION_POWER: { switch (p.strategyPredictionPower) { case STRATEGY_PREDICTION_POWER_LINEAR: { #ifdef ENABLE_MLPACK if (p.knobMappingEnabled) { predictor = new PredictorRegressionMapping<PredictorLinearRegression>( type, p, configuration, samples); } else { predictor = new PredictorLinearRegression(type, p, configuration, samples); } #else throw std::runtime_error("Please recompile with -DENABLE_MLPACK=ON to " "use the required predictor."); #endif } break; case STRATEGY_PREDICTION_POWER_LEO: { #ifdef ENABLE_ARMADILLO predictor = new PredictorLeo(type, p, configuration, samples); #else throw std::runtime_error("Please recompile with -DENABLE_ARMADILLO=ON to " "use the required predictor."); #endif } break; case STRATEGY_PREDICTION_POWER_SMT: { #ifdef ENABLE_MLPACK predictor = new PredictorSMT(type, p, configuration, samples); #else throw std::runtime_error("Please recompile with -DENABLE_GSL=ON to use " "the required predictor."); #endif } break; default: { throw std::runtime_error("Unknown prediction strategy."); } break; } } break; } return std::unique_ptr<Predictor>(predictor); } SelectorLearner::SelectorLearner(const Parameters &p, const Configuration &configuration, const Smoother<MonitoredSample> *samples) : SelectorPredictive( p, configuration, samples, getPredictor(PREDICTION_THROUGHPUT, p, configuration, samples), getPredictor(PREDICTION_POWER, p, configuration, samples)), _explorer(NULL), _firstPointGenerated(false), _contractViolations(0), _accuracyViolations(0), _totalCalPoints(0), _updatingInterference(false) { /***************************************/ /* Explorers */ /***************************************/ vector<bool> knobsFlags; vector<KnobsValues> additionalPoints; knobsFlags.resize(KNOB_NUM, false); if (_p.strategyPredictionPerformance == STRATEGY_PREDICTION_PERFORMANCE_USL || _p.strategyPredictionPerformance == STRATEGY_PREDICTION_PERFORMANCE_USLP) { KnobsValues kv(KNOB_VALUE_RELATIVE); // For the precise version of USL, we need to also take the throughput with // parallelism degree equal to one. if (_p.strategyPredictionPerformance == STRATEGY_PREDICTION_PERFORMANCE_USLP) { kv.reset(); kv[KNOB_VIRTUAL_CORES] = 0.0; kv[KNOB_FREQUENCY] = 0.0; kv[KNOB_CLKMOD] = 0.0; additionalPoints.push_back(kv); } kv.reset(); kv[KNOB_VIRTUAL_CORES] = 100.0; kv[KNOB_FREQUENCY] = 100; kv[KNOB_CLKMOD] = 100.0; additionalPoints.push_back(kv); if (_p.knobFrequencyEnabled || _p.knobClkModEnabled) { kv.reset(); kv[KNOB_VIRTUAL_CORES] = 100.0; kv[KNOB_FREQUENCY] = 0.0; kv[KNOB_CLKMOD] = 0.0; additionalPoints.push_back(kv); } // I only need to explore on virtual cores. knobsFlags[KNOB_VIRTUAL_CORES] = true; } else if ((_p.strategyPredictionPerformance == STRATEGY_PREDICTION_PERFORMANCE_AMDAHL || _p.strategyPredictionPower == STRATEGY_PREDICTION_POWER_LINEAR)) { // I only need to explore on virtual cores and frequency. knobsFlags[KNOB_VIRTUAL_CORES] = true; knobsFlags[KNOB_FREQUENCY] = true; } else if (_p.strategyPredictionPerformance == STRATEGY_PREDICTION_PERFORMANCE_SMT || _p.strategyPredictionPower == STRATEGY_PREDICTION_POWER_SMT) { KnobsValues kv(KNOB_VALUE_RELATIVE); kv.reset(); kv[KNOB_VIRTUAL_CORES] = 0.0; kv[KNOB_FREQUENCY] = 0.0; kv[KNOB_HYPERTHREADING] = 0.0; additionalPoints.push_back(kv); KnobsValues kvreal(KNOB_VALUE_REAL); /* if(_p.activeThreads < _numPhyCores){ kvreal[KNOB_VIRTUAL_CORES] = _p.activeThreads; }else{ kvreal[KNOB_VIRTUAL_CORES] = _numPhyCores; } */ kvreal[KNOB_VIRTUAL_CORES] = _configuration.getKnob(KNOB_VIRTUAL_CORES)->getAllowedValues().back() / _configuration.getKnob(KNOB_HYPERTHREADING)->getAllowedValues().back(); // Max number of physical cores kvreal[KNOB_FREQUENCY] = _configuration.getKnob(KNOB_FREQUENCY)->getAllowedValues().front(); kvreal[KNOB_HYPERTHREADING] = _configuration.getKnob(KNOB_HYPERTHREADING)->getAllowedValues().front(); // Convert real to relative kv.reset(); kv[KNOB_VIRTUAL_CORES] = _configuration.getKnob(KNOB_VIRTUAL_CORES)->getRelativeFromReal(kvreal[KNOB_VIRTUAL_CORES]); kv[KNOB_FREQUENCY] = _configuration.getKnob(KNOB_FREQUENCY)->getRelativeFromReal(kvreal[KNOB_FREQUENCY]); kv[KNOB_HYPERTHREADING] = _configuration.getKnob(KNOB_HYPERTHREADING)->getRelativeFromReal(kvreal[KNOB_HYPERTHREADING]); additionalPoints.push_back(kv); kv.reset(); kv[KNOB_VIRTUAL_CORES] = 0.0; kv[KNOB_FREQUENCY] = 100.0; kv[KNOB_HYPERTHREADING] = 0.0; additionalPoints.push_back(kv); kvreal.reset(); kvreal[KNOB_VIRTUAL_CORES] = _configuration.getKnob(KNOB_HYPERTHREADING)->getAllowedValues().back(); // All virtual cores in one physical core kvreal[KNOB_FREQUENCY] = _configuration.getKnob(KNOB_FREQUENCY)->getAllowedValues().front(); kvreal[KNOB_HYPERTHREADING] = _configuration.getKnob(KNOB_HYPERTHREADING)->getAllowedValues().back(); // Convert real to relative kv.reset(); kv[KNOB_VIRTUAL_CORES] = _configuration.getKnob(KNOB_VIRTUAL_CORES)->getRelativeFromReal(kvreal[KNOB_VIRTUAL_CORES]); kv[KNOB_FREQUENCY] = _configuration.getKnob(KNOB_FREQUENCY)->getRelativeFromReal(kvreal[KNOB_FREQUENCY]); kv[KNOB_HYPERTHREADING] = _configuration.getKnob(KNOB_HYPERTHREADING)->getRelativeFromReal(kvreal[KNOB_HYPERTHREADING]); additionalPoints.push_back(kv); kv.reset(); kv[KNOB_VIRTUAL_CORES] = 100.0; kv[KNOB_FREQUENCY] = 0; kv[KNOB_HYPERTHREADING] = 100.0; additionalPoints.push_back(kv); // I only need to explore on virtual cores, frequency and contexts knobsFlags[KNOB_VIRTUAL_CORES] = true; knobsFlags[KNOB_FREQUENCY] = true; knobsFlags[KNOB_HYPERTHREADING] = false; // The points we need for SMT predictions are those in the fixed starting points. } else { for (size_t i = 0; i < KNOB_NUM; i++) { knobsFlags[i] = _p.isKnobEnabled((KnobType) i); } } switch (_p.strategyExploration) { case STRATEGY_EXPLORATION_RANDOM: { _explorer = new ExplorerRandom(knobsFlags, additionalPoints); } break; #ifdef ENABLE_GSL case STRATEGY_EXPLORATION_HALTON: case STRATEGY_EXPLORATION_HALTON_REVERSE: case STRATEGY_EXPLORATION_NIEDERREITER: case STRATEGY_EXPLORATION_SOBOL: { _explorer = new ExplorerLowDiscrepancy(knobsFlags, _p.strategyExploration, additionalPoints); } break; #endif default: { throw std::runtime_error("Unknown exploration strategy."); } } if (_p.knobMappingEnabled && (_p.strategyPredictionPerformance == STRATEGY_PREDICTION_PERFORMANCE_AMDAHL || _p.strategyPredictionPerformance == STRATEGY_PREDICTION_PERFORMANCE_USL || _p.strategyPredictionPerformance == STRATEGY_PREDICTION_PERFORMANCE_USLP || _p.strategyPredictionPower == STRATEGY_PREDICTION_POWER_LINEAR)) { _explorer = new ExplorerMultiple(knobsFlags, _explorer, KNOB_MAPPING, MAPPING_TYPE_NUM); } } SelectorLearner::~SelectorLearner() { if (_explorer) { delete _explorer; } } KnobsValues SelectorLearner::getNextKnobsValues() { KnobsValues kv; if (_updatingInterference) { #ifdef ENABLE_GSL // It can only be done for PERF_* contract (so is primary) and on USL // predictors. (Check already done when the flag is set). PredictorUsl *pred = dynamic_cast<PredictorUsl *>(getPrimaryPredictor()); pred->updateInterference(); if (!_interferenceUpdatePoints.empty()) { kv = _interferenceUpdatePoints.back(); _interferenceUpdatePoints.pop_back(); return kv; } else { _updatingInterference = false; pred->updateCoefficients(); return _beforeInterferenceConf; } #endif } _previousConfiguration = _configuration.getRealValues(); if (_forced && !_forcedReturned) { _forcedReturned = true; return _forcedConfiguration; } bool contractViolated = isContractViolated(); bool accurate = isAccurate(); /** * The first point is generated as soon as the application starts. * Accordingly, we do not executed tasks in the original configuration * used to create the application. For this reason, we do not use * it to refine the model. * E.g. The application has been created with configuration X * and as soon as it starts we move it to configuration Y. * We do not refine with configuration X since it has never * been real executed. **/ if (!_firstPointGenerated) { _firstPointGenerated = true; startCalibration(); } else { if (isCalibrating()) { refine(); ++_totalCalPoints; } } if (isCalibrating()) { if (!predictorsReady()) { kv = getNextMeaningfulKnobsValues(_explorer); ++_numCalibrationPoints; } else { if (predictionsDone() && accurate) { kv = getBestKnobsValues(); updatePredictions(kv); DEBUG("Finished in " << _numCalibrationPoints << " steps with configuration " << kv); stopCalibration(); // In this case I do not update _numCalibrationPoints // since the next one will be (hopefully) the definitive // configuration. } else { kv = getNextMeaningfulKnobsValues(_explorer); updatePredictions(kv); ++_numCalibrationPoints; } } } else { if (contractViolated) { ++_contractViolations; } if (!accurate) { ++_accuracyViolations; } if (phaseChanged()) { /******************* Phase change. *******************/ _explorer->reset(); kv = getNextMeaningfulKnobsValues(_explorer); // Drop old models. clearPredictors(); startCalibration(); ++_numCalibrationPoints; resetTotalCalibrationTime(); _accuracyViolations = 0; _contractViolations = 0; _totalCalPoints = 0; _forced = false; DEBUG("Phase changed, recalibrating."); } else if (_bandwidthIn->coefficientVariation() > 100.0) { // TODO Remove magic number /******************* Bandwidth change. *******************/ refine(); ++_totalCalPoints; kv = getBestKnobsValues(); updatePredictions(kv); _accuracyViolations = 0; _contractViolations = 0; _bandwidthIn->reset(); _forced = false; DEBUG("Input bandwidth fluctuations, recomputing best solution."); } else if ((!_p.maxCalibrationTime || getTotalCalibrationTime() < _p.maxCalibrationTime) && (!_p.maxCalibrationSteps || _totalCalPoints < _p.maxCalibrationSteps) && ((!accurate && _accuracyViolations > _p.tolerableSamples) || (isBestSolutionFeasible() && !_forced && contractViolated && _contractViolations > _p.tolerableSamples))) { /******************* More calibration points. *******************/ kv = getNextMeaningfulKnobsValues(_explorer); updatePredictions(kv); refine(); ++_totalCalPoints; startCalibration(); ++_numCalibrationPoints; _accuracyViolations = 0; _contractViolations = 0; if (!accurate) { DEBUG("Inaccurate model, adding more points."); } else { DEBUG("Contract violated, adding more points."); } } else { /******************* Stable. *******************/ if (accurate && _accuracyViolations) { --_accuracyViolations; } if (!contractViolated && _contractViolations) { --_contractViolations; } kv = _configuration.getRealValues(); } } return kv; } bool SelectorLearner::phaseChanged() const { switch (_p.strategyPhaseDetection) { case STRATEGY_PHASE_DETECTION_NONE: { return false; } break; case STRATEGY_PHASE_DETECTION_TRIVIAL: { if (!_configuration.equal(_previousConfiguration)) { // We need to check that this configuration is equal to the previous one // to avoid to detect as a phase change a configuration change. return false; } // For multi applications scenario we ignore power consumption variation. return _samples->coefficientVariation().latency > 20.0 || (!_calibrationCoordination && _samples->coefficientVariation().watts > 20.0); } break; default: { return false; } break; } } void SelectorLearner::updateModelsInterference() { if ((isPrimaryRequirement(_p.requirements.powerConsumption) || isPrimaryRequirement(_p.requirements.latency)) || (_p.strategyPredictionPerformance != STRATEGY_PREDICTION_PERFORMANCE_USL && _p.strategyPredictionPerformance != STRATEGY_PREDICTION_PERFORMANCE_USLP)) { throw std::runtime_error( "updateModelForInterference is only supported for " "PERF_* contracts and for USL* performance predictors."); } _beforeInterferenceConf = _configuration.getRealValues(); _updatingInterference = true; // We only add 2 points (the third is the current one). KnobsValues kv(KNOB_VALUE_RELATIVE); kv[KNOB_FREQUENCY] = 0; kv[KNOB_VIRTUAL_CORES] = 0.5; _interferenceUpdatePoints.push_back(kv); kv[KNOB_VIRTUAL_CORES] = 0; _interferenceUpdatePoints.push_back(kv); } bool SelectorLearner::areModelsUpdated() const { return !_updatingInterference; } SelectorFixedExploration::SelectorFixedExploration( const Parameters &p, const Configuration &configuration, const Smoother<MonitoredSample> *samples, std::unique_ptr<Predictor> throughputPredictor, std::unique_ptr<Predictor> powerPredictor, size_t numSamples) : SelectorPredictive(p, configuration, samples, std::move(throughputPredictor), std::move(powerPredictor)) { const std::vector<KnobsValues> &combinations = _configuration.getAllRealCombinations(); size_t numConfigurations = combinations.size(); for (size_t i = 0; i < numConfigurations; i += ceil((double) numConfigurations / numSamples)) { _confToExplore.push_back(combinations.at(i)); } } SelectorFixedExploration::~SelectorFixedExploration() { ; } KnobsValues SelectorFixedExploration::getNextKnobsValues() { _previousConfiguration = _configuration.getRealValues(); if (_confToExplore.size()) { if (!isCalibrating()) { startCalibration(); } else { refine(); } KnobsValues r = _confToExplore.back(); _confToExplore.pop_back(); ++_numCalibrationPoints; return r; } else { if (isCalibrating()) { refine(); KnobsValues kv = getBestKnobsValues(); stopCalibration(); return kv; } else { return _configuration.getRealValues(); } } } #ifdef ENABLE_ARMADILLO SelectorLeo::SelectorLeo(const Parameters &p, const Configuration &configuration, const Smoother<MonitoredSample> *samples) : SelectorFixedExploration( p, configuration, samples, std::unique_ptr<Predictor>(new PredictorLeo(PREDICTION_THROUGHPUT, p, configuration, samples)), std::unique_ptr<Predictor>( new PredictorLeo(PREDICTION_POWER, p, configuration, samples)), p.leo.numSamples) { ; } SelectorLeo::~SelectorLeo() { ; } #endif SelectorFullSearch::SelectorFullSearch(const Parameters &p, const Configuration &configuration, const Smoother<MonitoredSample> *samples) : SelectorFixedExploration( p, configuration, samples, std::unique_ptr<Predictor>(new PredictorFullSearch( PREDICTION_THROUGHPUT, p, configuration, samples)), std::unique_ptr<Predictor>(new PredictorFullSearch( PREDICTION_POWER, p, configuration, samples)), configuration.getAllRealCombinations().size()) { ; } SelectorFullSearch::~SelectorFullSearch() { ; } Frequency SelectorLiMartinez::findNearestFrequency(Frequency f) const { Frequency bestDistance = _availableFrequencies.back(); Frequency bestFrequency = _availableFrequencies.back(); for (size_t i = 0; i < _availableFrequencies.size(); i++) { Frequency distance = std::abs(static_cast<long long>(_availableFrequencies.at(i) - f)); if (distance < bestDistance) { bestDistance = distance; bestFrequency = _availableFrequencies.at(i); } } return bestFrequency; } void SelectorLiMartinez::goRight() { _low1 = _low2; _high1 = _mid2 - 1; _low2 = _mid2 + 1; _mid1 = (_low1 + _high1) / 2.0; _mid2 = (_low2 + _high2) / 2.0; } void SelectorLiMartinez::goLeft() { _high1 = _mid1 - 1; _low2 = _mid1 + 1; _high2 = _high1; _mid1 = (_low1 + _high1) / 2.0; _mid2 = (_low2 + _high2) / 2.0; } bool SelectorLiMartinez::isMaxPerformanceConfiguration() const { return false; } SelectorLiMartinez::SelectorLiMartinez(const Parameters &p, const Configuration &configuration, const Smoother<MonitoredSample> *samples) : Selector(p, configuration, samples), _firstPointGenerated(false), _low1(0), _mid1(0), _high1(0), _low2(0), _mid2(0), _high2(0), _midId(1), _currentWatts(DBL_MAX), _optimalWatts(DBL_MAX), _optimalWorkers( configuration.getKnob(KNOB_VIRTUAL_CORES)->getAllowedValues().back()), _currentBw(0), _leftBw(0), _rightBw(0), _improved(false), _optimalFound(false) { for (auto it : _configuration.getKnob(KNOB_FREQUENCY)->getAllowedValues()) { _availableFrequencies.push_back((Frequency) it); } _optimalFrequency = _availableFrequencies.back(); _allowedCores = configuration.getKnob(KNOB_VIRTUAL_CORES)->getAllowedValues(); } SelectorLiMartinez::~SelectorLiMartinez() { ; } KnobsValues SelectorLiMartinez::getNextKnobsValues() { _previousConfiguration = _configuration.getRealValues(); KnobsValues kv(KNOB_VALUE_REAL); if (!_firstPointGenerated) { _firstPointGenerated = true; uint maxWorkers = _allowedCores .size(); //_configuration.getKnob(KNOB_TYPE_WORKERS)->getRealValue(); _low2 = 1; _mid2 = maxWorkers / 2.0; _high2 = maxWorkers; DEBUG("Max workers: " << maxWorkers << " mid2: " << _mid2); kv[KNOB_VIRTUAL_CORES] = _allowedCores[_mid2 - 1]; kv[KNOB_FREQUENCY] = _availableFrequencies.back(); _midId = 2; startCalibration(); DEBUG("Generating first point: " << kv); ++_numCalibrationPoints; } else { if (!isCalibrating()) { return _optimalKv; } else if (!isContractViolated()) { ++_numCalibrationPoints; _currentWatts = _samples->average().watts; if (_currentWatts < _optimalWatts) { _improved = true; DEBUG("Found a new optimal watts: " << _currentWatts << " vs. " << _optimalWatts); _optimalWatts = _currentWatts; _optimalFrequency = _configuration.getKnob(KNOB_FREQUENCY)->getRealValue(); _optimalWorkers = _configuration.getKnob(KNOB_VIRTUAL_CORES)->getRealValue(); _optimalFound = true; DEBUG("Optimal: " << _optimalWorkers << ", " << _optimalFrequency); } // We should keep decreasing the frequency Frequency currentFrequency = _configuration.getKnob(KNOB_FREQUENCY)->getRealValue(); kv[KNOB_VIRTUAL_CORES] = _configuration.getKnob(KNOB_VIRTUAL_CORES)->getRealValue(); Frequency nextFrequency = currentFrequency * (_p.requirements.throughput / _samples->average().throughput); nextFrequency = findNearestFrequency(nextFrequency); if (nextFrequency == currentFrequency) { --_numCalibrationPoints; goto changeworkers; } else { kv[KNOB_FREQUENCY] = nextFrequency; DEBUG("Keeping going down on frequencies. We move to: " << kv); } } else { changeworkers: ++_numCalibrationPoints; // I have to change the number of workers kv[KNOB_FREQUENCY] = _availableFrequencies.back(); if (_optimalWatts == DBL_MAX) { // Still I have not found a number of workers that satisfied // the time requirement. I increase workers. (Go right). kv[KNOB_VIRTUAL_CORES] = _allowedCores[_mid2 - 1]; goRight(); _midId = 2; DEBUG("New interval 1: [" << _low1 << "," << _mid1 << "," << _high1 << "]"); DEBUG("New interval 2: [" << _low2 << "," << _mid2 << "," << _high2 << "]"); if (_low1 > _high1 || _low2 > _high2) { kv[KNOB_VIRTUAL_CORES] = _optimalWorkers; kv[KNOB_FREQUENCY] = _optimalFrequency; _optimalKv = kv; stopCalibration(); DEBUG("Exploration finished with: " << kv); } } else if (_currentWatts > _optimalWatts || !_improved) { DEBUG("This number of workers is worst than the best we found " "up to now."); // This number of workers is not ok if (_midId == 1) { kv[KNOB_VIRTUAL_CORES] = _allowedCores[_mid2 - 1]; _midId = 2; DEBUG("Trying with the right side. We move to " << kv); } else { // Both explored and both are not ok, finished kv[KNOB_VIRTUAL_CORES] = _optimalWorkers; kv[KNOB_FREQUENCY] = _optimalFrequency; _optimalKv = kv; stopCalibration(); DEBUG("Both side are worst. Terminated with: " << kv); } } else { _improved = false; if (_midId == 1) { goLeft(); } else { goRight(); } DEBUG("New interval 1: [" << _low1 << "," << _mid1 << "," << _high1 << "]"); DEBUG("New interval 2: [" << _low2 << "," << _mid2 << "," << _high2 << "]"); if (_low1 <= _high1) { _midId = 1; kv[KNOB_VIRTUAL_CORES] = _allowedCores[_mid1 - 1]; DEBUG("We move to " << kv); } else if (_low2 <= _high2) { _midId = 2; kv[KNOB_VIRTUAL_CORES] = _allowedCores[_mid2 - 1]; DEBUG("We move to " << kv); } else { if (_optimalFound) { kv[KNOB_VIRTUAL_CORES] = _optimalWorkers; kv[KNOB_FREQUENCY] = _optimalFrequency; } else { // Suboptimal solution for perf contract is maximum // frequency and last visited cores. kv[KNOB_VIRTUAL_CORES] = _configuration.getKnob(KNOB_VIRTUAL_CORES)->getRealValue(); kv[KNOB_FREQUENCY] = _availableFrequencies.back(); } _optimalKv = kv; stopCalibration(); DEBUG("Exploration finished with: " << kv); } } } } return kv; } SelectorHMPNelderMead::SelectorHMPNelderMead( const Parameters &p, const Configuration &configuration, const Smoother<MonitoredSample> *samples) : Selector(p, configuration, samples), _opt(configuration.getNumHMP() * 2), _firstGenerated(false) { KnobsValues firstReal = getFirstConfiguration(); KnobsValues firstRelative(KNOB_VALUE_RELATIVE, _configuration.getNumHMP()); for (size_t i = 0; i < _configuration.getNumHMP(); i++) { KnobVirtualCores *kCores = dynamic_cast<KnobVirtualCores *>( _configuration.getKnob(i, KNOB_VIRTUAL_CORES)); KnobFrequency *kFrequency = dynamic_cast<KnobFrequency *>( _configuration.getKnob(i, KNOB_FREQUENCY)); firstRelative(i, KNOB_VIRTUAL_CORES) = kCores->getRelativeFromReal(firstReal(i, KNOB_VIRTUAL_CORES)); firstRelative(i, KNOB_FREQUENCY) = kFrequency->getRelativeFromReal(firstReal(i, KNOB_FREQUENCY)); } _opt.insert(kvToNmVector(firstRelative)); _lastRelative = firstRelative; for (size_t i = 0; i < _configuration.getNumHMP(); i++) { KnobVirtualCores *kCores = dynamic_cast<KnobVirtualCores *>( _configuration.getKnob(i, KNOB_VIRTUAL_CORES)); KnobFrequency *kFrequency = dynamic_cast<KnobFrequency *>( _configuration.getKnob(i, KNOB_FREQUENCY)); KnobsValues kv = firstRelative; double realCores = kCores->getNextRealValue( firstReal(i, KNOB_VIRTUAL_CORES), _p.nelderMeadRange); double realFrequency = kFrequency->getNextRealValue( firstReal(i, KNOB_FREQUENCY), _p.nelderMeadRange); kv(i, KNOB_VIRTUAL_CORES) = kCores->getRelativeFromReal(realCores); kv(i, KNOB_FREQUENCY) = kFrequency->getRelativeFromReal(realFrequency); DEBUG("Adding " << kv(0, KNOB_VIRTUAL_CORES) << ", " << kv(0, KNOB_FREQUENCY) << "|" << kv(1, KNOB_VIRTUAL_CORES) << ", " << kv(1, KNOB_FREQUENCY) << " to the starting simplex."); _opt.insert(kvToNmVector(kv)); kv = firstRelative; realCores = kCores->getPreviousRealValue(firstReal(i, KNOB_VIRTUAL_CORES), _p.nelderMeadRange); realFrequency = kFrequency->getPreviousRealValue( firstReal(i, KNOB_FREQUENCY), _p.nelderMeadRange); kv(i, KNOB_VIRTUAL_CORES) = kCores->getRelativeFromReal(realCores); kv(i, KNOB_FREQUENCY) = kFrequency->getRelativeFromReal(realFrequency); DEBUG("Adding " << kv(0, KNOB_VIRTUAL_CORES) << ", " << kv(0, KNOB_FREQUENCY) << "|" << kv(1, KNOB_VIRTUAL_CORES) << ", " << kv(1, KNOB_FREQUENCY) << " to the starting simplex."); _opt.insert(kvToNmVector(kv)); } } SelectorHMPNelderMead::~SelectorHMPNelderMead() { ; } // Vector[0] = VirtualCores-0, Vector[1]=Frequency-0, Vector[2]=VirtualCores-1, // Vector[3]=Frequency-1 KnobsValues SelectorHMPNelderMead::nmVectorToKv(neme::Vector v) const { KnobsValues kv(KNOB_VALUE_RELATIVE, _configuration.getNumHMP()); DEBUG(v[0]); DEBUG(v[1]); DEBUG(v[2]); DEBUG(v[3]); for (uint i = 0; i < _configuration.getNumHMP(); i++) { kv(i, KNOB_VIRTUAL_CORES) = v[i * 2]; kv(i, KNOB_FREQUENCY) = v[i * 2 + 1]; } return kv; } neme::Vector SelectorHMPNelderMead::kvToNmVector(KnobsValues kv) const { if (!kv.areRelative()) { throw std::runtime_error("kvToNmVector only accepts relative KnobsValues"); } neme::Vector v; v.prepare(_configuration.getNumHMP() * 2); // We currently support only cores + frequency for (uint i = 0; i < _configuration.getNumHMP(); i++) { v[i * 2] = kv(i, KNOB_VIRTUAL_CORES); v[i * 2 + 1] = kv(i, KNOB_FREQUENCY); } return v; } KnobsValues SelectorHMPNelderMead::getFirstConfiguration() const { KnobsValues kv(KNOB_VALUE_REAL, _configuration.getNumHMP()); for (size_t i = 0; i < _configuration.getNumHMP(); i++) { kv(i, KNOB_VIRTUAL_CORES) = _p.firstConfiguration.virtualCores[i]; kv(i, KNOB_FREQUENCY) = _p.firstConfiguration.frequency[i]; } return kv; } double SelectorHMPNelderMead::nmScore() const { double watts = _samples->average().watts; double thr = _samples->average().throughput; double executionTime = (_remainingTasks / thr); double energy = watts * executionTime; double score = 0; // Old scoring function (for arbitrary requirements) /* if (_p.requirements.powerConsumption != NORNIR_REQUIREMENT_UNDEF && _p.requirements.powerConsumption != NORNIR_REQUIREMENT_MIN) { score += (_p.requirements.powerConsumption - watts) / _p.requirements.powerConsumption; } if (_p.requirements.throughput != NORNIR_REQUIREMENT_UNDEF && _p.requirements.throughput != NORNIR_REQUIREMENT_MAX) { score += (thr - _p.requirements.throughput) / _p.requirements.throughput; } if (_p.requirements.energy != NORNIR_REQUIREMENT_UNDEF && _p.requirements.energy != NORNIR_REQUIREMENT_MIN) { score += (_p.requirements.energy - energy) / _p.requirements.energy; } if (_p.requirements.executionTime != NORNIR_REQUIREMENT_UNDEF && _p.requirements.executionTime != NORNIR_REQUIREMENT_MIN) { score += (_p.requirements.executionTime - executionTime) / _p.requirements.executionTime; } // Add the score distance to the MIN/MAX requirements only when all primary requiremnts are satisfied if ((_p.requirements.energy != NORNIR_REQUIREMENT_UNDEF && _p.requirements.energy != NORNIR_REQUIREMENT_MIN && energy > _p.requirements.energy) || (_p.requirements.throughput != NORNIR_REQUIREMENT_UNDEF && _p.requirements.throughput != NORNIR_REQUIREMENT_MAX && thr < _p.requirements.throughput) || (_p.requirements.powerConsumption != NORNIR_REQUIREMENT_UNDEF && _p.requirements.powerConsumption != NORNIR_REQUIREMENT_MIN && watts > _p.requirements.powerConsumption) || (_p.requirements.executionTime != NORNIR_REQUIREMENT_UNDEF && _p.requirements.executionTime != NORNIR_REQUIREMENT_MAX && executionTime > _p.requirements.executionTime)) { ; // There is a primary requirement not yet satisfied. }else{ if(_p.requirements.powerConsumption == NORNIR_REQUIREMENT_MIN){ score += (1000.0 - watts); }else if(_p.requirements.throughput == NORNIR_REQUIREMENT_MAX){ score += thr; }else if(_p.requirements.energy == NORNIR_REQUIREMENT_MIN){ score += (1000.0*executionTime - energy); }else if(_p.requirements.executionTime == NORNIR_REQUIREMENT_MIN){ throw std::runtime_error("Execution time req not yet available."); } } */ if (_p.requirements.powerConsumption == NORNIR_REQUIREMENT_MIN) { score = -watts; } if (_p.requirements.throughput == NORNIR_REQUIREMENT_MAX) { score = thr; } if (_p.requirements.energy == NORNIR_REQUIREMENT_MIN) { score = -energy; } if (_p.requirements.executionTime == NORNIR_REQUIREMENT_MIN) { score = -thr; } #ifdef DEBUG_SELECTORS KnobsValues kv = _configuration.getRealValues(); #endif DEBUG("Current values: " << kv(0, KNOB_VIRTUAL_CORES) << ", " << kv(0, KNOB_FREQUENCY) << "|" << kv(1, KNOB_VIRTUAL_CORES) << ", " << kv(1, KNOB_FREQUENCY) << " score " << score); return score; } static bool validVector(const neme::Vector &v) { for (int i = 0; i < v.dimension(); i++) { if (v.at(i) < 0 || v.at(i) > 100) { return false; } } return true; } KnobsValues SelectorHMPNelderMead::getNextKnobsValues() { if (!_firstGenerated) { _firstGenerated = true; return getFirstConfiguration(); } else { if (_opt.done()) { return _configuration.getRealValues(); } else { neme::Vector vIn = kvToNmVector(_lastRelative); neme::Vector vOut = _opt.step(vIn, nmScore()); while (!validVector(vOut)) { DEBUG("Skipping [" << vOut[0] << " " << vOut[1] << " " << vOut[2] << " " << vOut[3] << "]"); vOut = _opt.step(vOut, std::numeric_limits<float>::lowest()); } _lastRelative = nmVectorToKv(vOut); return _lastRelative; } } } SelectorRapl::SelectorRapl(const Parameters& p, const Configuration& configuration, const Smoother<MonitoredSample>* samples): SelectorFixed(p, configuration, samples), _pc(_p.mammut.getInstanceEnergy()->getPowerCapper(_p.powerDomain)){ PowerCap cap; cap.value = _p.requirements.powerConsumption; cap.window = 1; _pc->set(cap); } SelectorRapl::~SelectorRapl(){ ; } SelectorPforChunk::SelectorPforChunk(const Parameters& p, const Configuration& configuration, const Smoother<MonitoredSample>* samples): Selector(p, configuration, samples){ ; } SelectorPforChunk::~SelectorPforChunk(){ ; } KnobsValues SelectorPforChunk::getNextKnobsValues(){ static int i = 0; KnobsValues kv(KNOB_VALUE_RELATIVE); if(i * 25 <= 100){ kv[KNOB_PFOR_CHUNK] = i*25; i++; }else{ kv[KNOB_PFOR_CHUNK] = 100; } return kv; } } // namespace nornir<|fim▁end|>
<|file_name|>Q2.java<|end_file_name|><|fim▁begin|>package fPPPrograms; import java.util.Scanner; public class LeapYear { public static void main(String[] args) { System.out.println("Enter a year to determine whether it is a leap year or not?"); Scanner yourInput = new Scanner(System.in); int year = yourInput.nextInt(); //String y = year%400 == 0? (year%4 == 0 ) && (year%100 !=0) ? "Yes" : "Not" : "Not" ; String y = ((year%4 == 0) && (year%100 != 0) || (year%400 == 0)) ? "Yes" : "Not"; System.out.println("The Year You Entered is " + y + " a Leap Year");<|fim▁hole|><|fim▁end|>
} }
<|file_name|>readblock.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import subprocess as sp def main(args): with open(args.disk, 'rb') as f: f.seek(args.block * args.block_size) block = (f.read(args.block_size) .ljust(args.block_size, b'\xff')) # what did you expect? print("%-8s %-s" % ('off', 'data')) return sp.run(['xxd', '-g1', '-'], input=block).returncode if __name__ == "__main__": import argparse import sys parser = argparse.ArgumentParser( description="Hex dump a specific block in a disk.") parser.add_argument('disk', help="File representing the block device.") parser.add_argument('block_size', type=lambda x: int(x, 0), help="Size of a block in bytes.") parser.add_argument('block', type=lambda x: int(x, 0),<|fim▁hole|> help="Address of block to dump.") sys.exit(main(parser.parse_args()))<|fim▁end|>
<|file_name|>long_words.py<|end_file_name|><|fim▁begin|>#http://codeforces.com/problemset/problem/71/A T = int(raw_input()) while(not T == 0): word = str(raw_input()) if len(word)>10:<|fim▁hole|> else: print word T-=1<|fim▁end|>
print word[0]+str(len(word[1:len(word)-1]))+word[len(word)-1]
<|file_name|>fixtures.py<|end_file_name|><|fim▁begin|>from django.contrib.auth.models import User from esus.phorum.models import Category, Table __all__ = ("user_super", "users_usual", "table_simple") def user_super(case): case.user_super = User.objects.create( username = "superuser", password = "sha1$aaa$b27189d65f3a148a8186753f3f30774182d923d5", first_name = "Esus", last_name = "master", is_staff = True, is_superuser = True, ) <|fim▁hole|> password = "sha1$aaa$b27189d65f3a148a8186753f3f30774182d923d5", first_name = "I", last_name = "Robot", is_staff = False, is_superuser = False, ) case.user_john_doe = User.objects.create( username = "JohnDoe", password = "sha1$aaa$b27189d65f3a148a8186753f3f30774182d923d5", first_name = "John", last_name = "Doe", is_staff = False, is_superuser = False, ) case.user_staff = User.objects.create( username = "Gnome", password = "sha1$aaa$b27189d65f3a148a8186753f3f30774182d923d5", first_name = "Wiki", last_name = "Gnome", is_staff = True, is_superuser = False, ) def table_simple(case, table_owner=None): case.category = Category.objects.create( name = u"Category", slug = u"category", ) case.table = case.category.add_table( name = u"Table", owner = table_owner or case.user_tester, ) def comment_simple(case, table=None, author=None): table = table or case.table author = author or case.user_john_doe case.comment_doe = case.table.add_comment( author = author, text = u"Humble user's comment" ) case.comment_owner = case.table.add_comment( author = table.owner, text = u"Table 0wn3rz comment" )<|fim▁end|>
def users_usual(case): case.user_tester = User.objects.create( username = "Tester",
<|file_name|>pathHelpers.ts<|end_file_name|><|fim▁begin|>export const getUrlForAbsolutePath = (path: string) => { return path.split('pages').pop().replace('.md', '') } export const getJobPath = (path: string) => { return path.split('jobs').pop().replace('.md', '') } export const getFileNameOnly = (path: string) => { return path.split('/').pop().replace('.md', '') } export const deleteTimestampFromUrl = (path: string) => { return path.replace(/([0-9]{4})-([0-9]{2})-([0-9]{2})-/, '') } <|fim▁hole|> string .replace(/([a-z])([A-Z])/g, '$1-$2') .replace(/\s+/g, '-') .replace(/[?=]/g, '') .toLowerCase()<|fim▁end|>
export const kebabCase = (string = '') =>
<|file_name|>p2.js<|end_file_name|><|fim▁begin|>version https://git-lfs.github.com/spec/v1<|fim▁hole|><|fim▁end|>
oid sha256:e2a38d0984b9d8a85bbc1b3e0131e2fee2b6a6dc5f31aeb248b213f41f36038d size 575053
<|file_name|>macro-redef.rs<|end_file_name|><|fim▁begin|>#![allow( dead_code, non_snake_case,<|fim▁hole|> non_upper_case_globals )] pub const FOO: u32 = 4; pub const BAR: u32 = 5; pub const BAZ: u32 = 6;<|fim▁end|>
non_camel_case_types,
<|file_name|>client.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python import json from tdclient import api, models class Client: """API Client for Treasure Data Service """ def __init__(self, *args, **kwargs): self._api = api.API(*args, **kwargs) def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() @property def api(self): """ an instance of :class:`tdclient.api.API` """ return self._api @property def apikey(self): """ API key string. """ return self._api.apikey def server_status(self): """ Returns: a string represents current server status. """ return self.api.server_status() def create_database(self, db_name, **kwargs): """ Args: db_name (str): name of a database to create Returns: `True` if success """ return self.api.create_database(db_name, **kwargs) def delete_database(self, db_name): """ Args: db_name (str): name of database to delete Returns: `True` if success """ return self.api.delete_database(db_name) def databases(self): """ Returns: a list of :class:`tdclient.models.Database` """ databases = self.api.list_databases() return [ models.Database(self, db_name, **kwargs) for (db_name, kwargs) in databases.items() ] def database(self, db_name): """ Args: db_name (str): name of a database Returns: :class:`tdclient.models.Database` """ databases = self.api.list_databases() for (name, kwargs) in databases.items(): if name == db_name: return models.Database(self, name, **kwargs) raise api.NotFoundError("Database '%s' does not exist" % (db_name)) def create_log_table(self, db_name, table_name): """ Args: db_name (str): name of a database table_name (str): name of a table to create Returns: `True` if success """ return self.api.create_log_table(db_name, table_name) def swap_table(self, db_name, table_name1, table_name2): """ Args: db_name (str): name of a database table_name1 (str): original table name table_name2 (str): table name you want to rename to Returns: `True` if success """ return self.api.swap_table(db_name, table_name1, table_name2) def update_schema(self, db_name, table_name, schema): """Updates the schema of a table Args: db_name (str): name of a database table_name (str): name of a table schema (list): a dictionary object represents the schema definition (will be converted to JSON) e.g. .. code-block:: python [ ["member_id", # column name "string", # data type "mem_id", # alias of the column name ], ["row_index", "long", "row_ind"], ... ] Returns: `True` if success """ return self.api.update_schema(db_name, table_name, json.dumps(schema)) def update_expire(self, db_name, table_name, expire_days): """Set expiration date to a table Args: db_name (str): name of a database table_name (str): name of a table epire_days (int): expiration date in days from today Returns: `True` if success """ return self.api.update_expire(db_name, table_name, expire_days) def delete_table(self, db_name, table_name): """Delete a table Args: db_name (str): name of a database table_name (str): name of a table Returns: a string represents the type of deleted table """ return self.api.delete_table(db_name, table_name) def tables(self, db_name): """List existing tables Args: db_name (str): name of a database Returns: a list of :class:`tdclient.models.Table` """ m = self.api.list_tables(db_name) return [ models.Table(self, db_name, table_name, **kwargs) for (table_name, kwargs) in m.items() ] def table(self, db_name, table_name): """ Args: db_name (str): name of a database table_name (str): name of a table Returns: :class:`tdclient.models.Table` Raises: tdclient.api.NotFoundError: if the table doesn't exist """ tables = self.tables(db_name) for table in tables: if table.table_name == table_name: return table raise api.NotFoundError("Table '%s.%s' does not exist" % (db_name, table_name)) def tail(self, db_name, table_name, count, to=None, _from=None, block=None): """Get the contents of the table in reverse order based on the registered time (last data first). Args: db_name (str): Target database name. table_name (str): Target table name. count (int): Number for record to show up from the end. to: Deprecated parameter. _from: Deprecated parameter. block: Deprecated parameter. Returns: [dict]: Contents of the table. """ return self.api.tail(db_name, table_name, count, to, _from, block) def change_database(self, db_name, table_name, new_db_name): """Move a target table from it's original database to new destination database. Args: db_name (str): Target database name. table_name (str): Target table name. new_db_name (str): Destination database name to be moved. Returns: bool: `True` if succeeded. """ return self.api.change_database(db_name, table_name, new_db_name) def query( self, db_name, q, result_url=None, priority=None, retry_limit=None, type="hive", **kwargs ): """Run a query on specified database table. Args: db_name (str): name of a database q (str): a query string result_url (str): result output URL. e.g., ``postgresql://<username>:<password>@<hostname>:<port>/<database>/<table>`` priority (int or str): priority (e.g. "NORMAL", "HIGH", etc.) retry_limit (int): retry limit type (str): name of a query engine Returns: :class:`tdclient.models.Job` Raises: ValueError: if unknown query type has been specified """ # for compatibility, assume type is hive unless specifically specified if type not in ["hive", "pig", "impala", "presto"]: raise ValueError("The specified query type is not supported: %s" % (type)) job_id = self.api.query( q, type=type, db=db_name, result_url=result_url, priority=priority, retry_limit=retry_limit, **kwargs ) return models.Job(self, job_id, type, q) def jobs(self, _from=None, to=None, status=None, conditions=None): """List jobs Args: _from (int, optional): Gets the Job from the nth index in the list. Default: 0. to (int, optional): Gets the Job up to the nth index in the list. By default, the first 20 jobs in the list are displayed status (str, optional): Filter by given status. {"queued", "running", "success", "error"} conditions (str, optional): Condition for ``TIMESTAMPDIFF()`` to search for slow queries. Avoid using this parameter as it can be dangerous. Returns: a list of :class:`tdclient.models.Job` """ results = self.api.list_jobs(_from, to, status, conditions) return [job_from_dict(self, d) for d in results] def job(self, job_id): """Get a job from `job_id` Args: job_id (str): job id Returns: :class:`tdclient.models.Job` """ d = self.api.show_job(str(job_id)) return job_from_dict(self, d, job_id=job_id) def job_status(self, job_id): """ Args: job_id (str): job id Returns: a string represents the status of the job ("success", "error", "killed", "queued", "running") """ return self.api.job_status(job_id) def job_result(self, job_id): """ Args: job_id (str): job id Returns: a list of each rows in result set """ return self.api.job_result(job_id) def job_result_each(self, job_id): """ Args: job_id (str): job id Returns: an iterator of result set """ for row in self.api.job_result_each(job_id): yield row def job_result_format(self, job_id, format): """ Args: job_id (str): job id format (str): output format of result set Returns: a list of each rows in result set """ return self.api.job_result_format(job_id, format) def job_result_format_each(self, job_id, format): """ Args: job_id (str): job id format (str): output format of result set Returns: an iterator of rows in result set """ for row in self.api.job_result_format_each(job_id, format): yield row def kill(self, job_id): """ Args: job_id (str): job id Returns: a string represents the status of killed job ("queued", "running") """ return self.api.kill(job_id) def export_data(self, db_name, table_name, storage_type, params=None): """Export data from Treasure Data Service Args: db_name (str): name of a database table_name (str): name of a table storage_type (str): type of the storage params (dict): optional parameters. Assuming the following keys: - access_key_id (str): ID to access the information to be exported. - secret_access_key (str): Password for the `access_key_id`. - file_prefix (str, optional): Filename of exported file. Default: "<database_name>/<table_name>" - file_format (str, optional): File format of the information to be exported. {"jsonl.gz", "tsv.gz", "json.gz"} - from (int, optional): From Time of the data to be exported in Unix epoch format. - to (int, optional): End Time of the data to be exported in Unix epoch format. - assume_role (str, optional): Assume role. - bucket (str): Name of bucket to be used. - domain_key (str, optional): Job domain key. - pool_name (str, optional): For Presto only. Pool name to be used, if not specified, default pool would be used. Returns: :class:`tdclient.models.Job` """ params = {} if params is None else params job_id = self.api.export_data(db_name, table_name, storage_type, params) return models.Job(self, job_id, "export", None) def partial_delete(self, db_name, table_name, to, _from, params=None): """Create a job to partially delete the contents of the table with the given time range. Args: db_name (str): Target database name. table_name (str): Target table name. to (int): Time in Unix Epoch format indicating the End date and time of the data to be deleted. Should be set only by the hour. Minutes and seconds values will not be accepted. _from (int): Time in Unix Epoch format indicating the Start date and time of the data to be deleted. Should be set only by the hour. Minutes and seconds values will not be accepted. params (dict, optional): Extra parameters. - pool_name (str, optional): Indicates the resource pool to execute this job. If not provided, the account's default resource pool would be used. - domain_key (str, optional): Domain key that will be assigned to the partial delete job to be created Returns: :class:`tdclient.models.Job` """ params = {} if params is None else params job_id = self.api.partial_delete(db_name, table_name, to, _from, params) return models.Job(self, job_id, "partialdelete", None) def create_bulk_import(self, name, database, table, params=None): """Create new bulk import session Args: name (str): name of new bulk import session database (str): name of a database table (str): name of a table Returns: :class:`tdclient.models.BulkImport` """ params = {} if params is None else params self.api.create_bulk_import(name, database, table, params) return models.BulkImport(self, name=name, database=database, table=table) def delete_bulk_import(self, name): """Delete a bulk import session Args: name (str): name of a bulk import session Returns: `True` if success """ return self.api.delete_bulk_import(name) def freeze_bulk_import(self, name): """Freeze a bulk import session Args: name (str): name of a bulk import session Returns: `True` if success """ return self.api.freeze_bulk_import(name) def unfreeze_bulk_import(self, name): """Unfreeze a bulk import session Args: name (str): name of a bulk import session Returns: `True` if success """ return self.api.unfreeze_bulk_import(name) def perform_bulk_import(self, name): """Perform a bulk import session Args: name (str): name of a bulk import session Returns: :class:`tdclient.models.Job` """ job_id = self.api.perform_bulk_import(name) return models.Job(self, job_id, "bulk_import", None) def commit_bulk_import(self, name): """Commit a bulk import session Args: name (str): name of a bulk import session Returns: `True` if success """ return self.api.commit_bulk_import(name) def bulk_import_error_records(self, name): """ Args: name (str): name of a bulk import session Returns: an iterator of error records """ for record in self.api.bulk_import_error_records(name): yield record def bulk_import(self, name): """Get a bulk import session Args: name (str): name of a bulk import session Returns: :class:`tdclient.models.BulkImport` """ data = self.api.show_bulk_import(name) return models.BulkImport(self, **data) def bulk_imports(self): """List bulk import sessions Returns: a list of :class:`tdclient.models.BulkImport` """ return [ models.BulkImport(self, **data) for data in self.api.list_bulk_imports() ] def bulk_import_upload_part(self, name, part_name, bytes_or_stream, size): """Upload a part to a bulk import session Args: name (str): name of a bulk import session part_name (str): name of a part of the bulk import session bytes_or_stream (file-like): a file-like object contains the part size (int): the size of the part """ return self.api.bulk_import_upload_part(name, part_name, bytes_or_stream, size) def bulk_import_upload_file(self, name, part_name, format, file, **kwargs): """Upload a part to Bulk Import session, from an existing file on filesystem. Args: name (str): name of a bulk import session part_name (str): name of a part of the bulk import session format (str): format of data type (e.g. "msgpack", "json", "csv", "tsv") file (str or file-like): the name of a file, or a file-like object, containing the data **kwargs: extra arguments. There is more documentation on `format`, `file` and `**kwargs` at `file import parameters`_. In particular, for "csv" and "tsv" data, you can change how data columns are parsed using the ``dtypes`` and ``converters`` arguments. * ``dtypes`` is a dictionary used to specify a datatype for individual columns, for instance ``{"col1": "int"}``. The available datatypes are ``"bool"``, ``"float"``, ``"int"``, ``"str"`` and ``"guess"``. If a column is also mentioned in ``converters``, then the function will be used, NOT the datatype. * ``converters`` is a dictionary used to specify a function that will be used to parse individual columns, for instance ``{"col1", int}``. The default behaviour is ``"guess"``, which makes a best-effort to decide the column datatype. See `file import parameters`_ for more details. .. _`file import parameters`: https://tdclient.readthedocs.io/en/latest/file_import_parameters.html """ return self.api.bulk_import_upload_file(name, part_name, format, file, **kwargs) def bulk_import_delete_part(self, name, part_name): """Delete a part from a bulk import session Args: name (str): name of a bulk import session part_name (str): name of a part of the bulk import session Returns: `True` if success """ return self.api.bulk_import_delete_part(name, part_name) def list_bulk_import_parts(self, name): """List parts of a bulk import session Args: name (str): name of a bulk import session Returns: a list of string represents the name of parts """ return self.api.list_bulk_import_parts(name) def create_schedule(self, name, params=None): """Create a new scheduled query with the specified name. Args: name (str): Scheduled query name. params (dict, optional): Extra parameters. - type (str): Query type. {"presto", "hive"}. Default: "hive" - database (str): Target database name. - timezone (str): Scheduled query's timezone. e.g. "UTC" For details, see also: https://gist.github.com/frsyuki/4533752 - cron (str, optional): Schedule of the query. {``"@daily"``, ``"@hourly"``, ``"10 * * * *"`` (custom cron)} See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084633/Scheduling+Jobs+Using+TD+Console - delay (int, optional): A delay ensures all buffered events are imported before running the query. Default: 0 - query (str): Is a language used to retrieve, insert, update and modify data. See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084438/SQL+Examples+of+Scheduled+Queries - priority (int, optional): Priority of the query. Range is from -2 (very low) to 2 (very high). Default: 0 - retry_limit (int, optional): Automatic retry count. Default: 0 - engine_version (str, optional): Engine version to be used. If none is specified, the account's default engine version would be set. {"stable", "experimental"} - pool_name (str, optional): For Presto only. Pool name to be used, if not specified, default pool would be used. - result (str, optional): Location where to store the result of the query. e.g. 'tableau://user:[email protected]:1234/datasource' Returns: :class:`datetime.datetime`: Start date time. """ if "cron" not in params: raise ValueError("'cron' option is required") if "query" not in params: raise ValueError("'query' option is required") params = {} if params is None else params return self.api.create_schedule(name, params) def delete_schedule(self, name): """Delete the scheduled query with the specified name. Args: name (str): Target scheduled query name. Returns: (str, str): Tuple of cron and query. """ return self.api.delete_schedule(name) def schedules(self): """Get the list of all the scheduled queries. Returns: [:class:`tdclient.models.Schedule`] """ result = self.api.list_schedules() return [models.Schedule(self, **m) for m in result] def update_schedule(self, name, params=None): """Update the scheduled query. Args: name (str): Target scheduled query name. params (dict): Extra parameters. - type (str): Query type. {"presto", "hive"}. Default: "hive" - database (str): Target database name. - timezone (str): Scheduled query's timezone. e.g. "UTC" For details, see also: https://gist.github.com/frsyuki/4533752 - cron (str, optional): Schedule of the query. {``"@daily"``, ``"@hourly"``, ``"10 * * * *"`` (custom cron)} See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084633/Scheduling+Jobs+Using+TD+Console - delay (int, optional): A delay ensures all buffered events are imported before running the query. Default: 0 - query (str): Is a language used to retrieve, insert, update and modify data. See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084438/SQL+Examples+of+Scheduled+Queries - priority (int, optional): Priority of the query. Range is from -2 (very low) to 2 (very high). Default: 0 - retry_limit (int, optional): Automatic retry count. Default: 0 - engine_version (str, optional): Engine version to be used. If none is specified, the account's default engine version would be set. {"stable", "experimental"} - pool_name (str, optional): For Presto only. Pool name to be used, if not specified, default pool would be used. - result (str, optional): Location where to store the result of the query. e.g. 'tableau://user:[email protected]:1234/datasource' """ params = {} if params is None else params self.api.update_schedule(name, params) def history(self, name, _from=None, to=None): """Get the history details of the saved query for the past 90days. Args: name (str): Target name of the scheduled query. _from (int, optional): Indicates from which nth record in the run history would be fetched. Default: 0. Note: Count starts from zero. This means that the first record in the list has a count of zero. to (int, optional): Indicates up to which nth record in the run history would be fetched. Default: 20 Returns: [:class:`tdclient.models.ScheduledJob`] """ result = self.api.history(name, _from, to) def scheduled_job(m): ( scheduled_at, job_id, type, status, query, start_at, end_at, result_url, priority, database, ) = m job_param = { "url": None, "debug": None, "start_at": start_at, "end_at": end_at, "cpu_time": None, "result_size": None, "result": None, "result_url": result_url, "hive_result_schema": None, "priority": priority, "retry_limit": None, "org_name": None, "database": database, } return models.ScheduledJob( self, scheduled_at, job_id, type, query, **job_param ) return [scheduled_job(m) for m in result] def run_schedule(self, name, time, num): """Execute the specified query. Args: name (str): Target scheduled query name. time (int): Time in Unix epoch format that would be set as TD_SCHEDULED_TIME num (int): Indicates how many times the query will be executed. Value should be 9 or less. Returns: [:class:`tdclient.models.ScheduledJob`] """ results = self.api.run_schedule(name, time, num) def scheduled_job(m): job_id, type, scheduled_at = m return models.ScheduledJob(self, scheduled_at, job_id, type, None) return [scheduled_job(m) for m in results] def import_data( self, db_name, table_name, format, bytes_or_stream, size, unique_id=None ): """Import data into Treasure Data Service Args: db_name (str): name of a database table_name (str): name of a table format (str): format of data type (e.g. "msgpack.gz") bytes_or_stream (str or file-like): a byte string or a file-like object contains the data size (int): the length of the data unique_id (str): a unique identifier of the data Returns: second in float represents elapsed time to import data """ return self.api.import_data( db_name, table_name, format, bytes_or_stream, size, unique_id=unique_id ) def import_file(self, db_name, table_name, format, file, unique_id=None): """Import data into Treasure Data Service, from an existing file on filesystem. This method will decompress/deserialize records from given file, and then convert it into format acceptable from Treasure Data Service ("msgpack.gz"). Args: db_name (str): name of a database table_name (str): name of a table format (str): format of data type (e.g. "msgpack", "json") file (str or file-like): a name of a file, or a file-like object contains the data unique_id (str): a unique identifier of the data Returns: float represents the elapsed time to import data """ return self.api.import_file( db_name, table_name, format, file, unique_id=unique_id ) def results(self): """Get the list of all the available authentications. Returns: a list of :class:`tdclient.models.Result` """ results = self.api.list_result() def result(m): name, url, organizations = m return models.Result(self, name, url, organizations) return [result(m) for m in results] def create_result(self, name, url, params=None): """Create a new authentication with the specified name. Args: name (str): Authentication name. url (str): Url of the authentication to be created. e.g. "ftp://test.com/" params (dict, optional): Extra parameters. Returns: bool: True if succeeded. """ params = {} if params is None else params return self.api.create_result(name, url, params) def delete_result(self, name): """Delete the authentication having the specified name. Args: name (str): Authentication name. Returns: bool: True if succeeded. """ return self.api.delete_result(name) def users(self): """List users Returns: a list of :class:`tdclient.models.User` """ results = self.api.list_users() def user(m): name, org, roles, email = m return models.User(self, name, org, roles, email) return [user(m) for m in results] def add_user(self, name, org, email, password): """Add a new user Args: name (str): name of the user org (str): organization email: (str): e-mail address password (str): password Returns: `True` if success """ return self.api.add_user(name, org, email, password) def remove_user(self, name): """Remove a user Args: name (str): name of the user Returns: `True` if success """ return self.api.remove_user(name) def list_apikeys(self, name): """ Args: name (str): name of the user Returns: a list of string of API key """ return self.api.list_apikeys(name) def add_apikey(self, name): """ Args: name (str): name of the user Returns: `True` if success """ return self.api.add_apikey(name) def remove_apikey(self, name, apikey): """ Args: name (str): name of the user apikey (str): an API key to remove Returns: `True` if success """ return self.api.remove_apikey(name, apikey) def close(self): """Close opened API connections. """ return self._api.close() def job_from_dict(client, dd, **values): d = dict() d.update(dd) d.update(values) return models.Job( client, d["job_id"], d["type"], d["query"], status=d.get("status"), url=d.get("url"), debug=d.get("debug"), start_at=d.get("start_at"), end_at=d.get("end_at"), created_at=d.get("created_at"), updated_at=d.get("updated_at"), cpu_time=d.get("cpu_time"), result_size=d.get("result_size"), result=d.get("result"), result_url=d.get("result_url"), hive_result_schema=d.get("hive_result_schema"), priority=d.get("priority"), retry_limit=d.get("retry_limit"), org_name=d.get("org_name"),<|fim▁hole|> user_name=d.get("user_name"), linked_result_export_job_id=d.get("linked_result_export_job_id"), result_export_target_job_id=d.get("result_export_target_job_id"), )<|fim▁end|>
database=d.get("database"), num_records=d.get("num_records"),
<|file_name|>routes.js<|end_file_name|><|fim▁begin|>var _ = require('underscore') , path = require('path') , passport = require('passport') , AuthCtrl = require('./controllers/auth') , UserCtrl = require('./controllers/user') , User = require('./models/User.js') , userRoles = require('../client/scripts/routingConfig').userRoles , accessLevels = require('../client/scripts/routingConfig').accessLevels; var routes = [ // Views { path: '/partials/*', httpMethod: 'GET', middleware: [function (req, res) { var requestedView = path.join('./', req.url); res.render(requestedView); }] }, // Auth { path: '/api/v1/register', httpMethod: 'POST', middleware: [AuthCtrl.register] }, { path: '/api/v1/login', httpMethod: 'POST', middleware: [AuthCtrl.login] }, { path: '/api/v1/logout', httpMethod: 'POST', middleware: [AuthCtrl.logout] }, // User resource { path: '/api/v1/users', httpMethod: 'GET', middleware: [UserCtrl.index], accessLevel: accessLevels.admin_level }, // All other get requests should be handled by AngularJS's client-side routing system { path: '/*', httpMethod: 'GET', middleware: [function(req, res) { var role = userRoles.guest_role , username = ''; if(req.user) { role = req.user.role; username = req.user.username; } res.cookie('user', JSON.stringify({ 'username': username, 'role': role })); res.render('index'); }] } ]; module.exports = function(app) { _.each(routes, function(route) { route.middleware.unshift(ensureAuthorized); var args = _.flatten([route.path, route.middleware]); switch(route.httpMethod.toUpperCase()) { case 'GET': app.get.apply(app, args); break; case 'POST': app.post.apply(app, args); break; case 'PUT': app.put.apply(app, args); break; case 'DELETE': app.delete.apply(app, args); break; default: throw new Error('Invalid HTTP method specified for route ' + route.path); break; }<|fim▁hole|> function ensureAuthorized(req, res, next) { var role; if(!req.user) { role = userRoles.guest_role; } else { role = req.user.role; } var accessLevel = _.findWhere(routes, { path: req.route.path, httpMethod: req.route.stack[0].method.toUpperCase() }).accessLevel || accessLevels.public_level; if(!(accessLevel === "*") && !(accessLevel.indexOf(role) !== -1)) { return res.send(403); } return next(); }<|fim▁end|>
}); }
<|file_name|>uploader.rs<|end_file_name|><|fim▁begin|>// External Dependencies ------------------------------------------------------ use diesel; use diesel::prelude::*; // Internal Dependencies ------------------------------------------------------ use super::super::Server; use ::db::models::User; use ::db::schema::users::dsl::{server_id, nickname as user_nickname, is_uploader}; use ::db::schema::users::table as userTable; // Server Uploader Interface -------------------------------------------------- impl Server { pub fn list_uploaders(&self) -> Vec<User> { userTable.filter( server_id.eq(&self.config.table_id) ).filter(is_uploader.eq(true)) .order(user_nickname) .load::<User>(&self.config.connection) .unwrap_or_else(|_| vec![]) } pub fn add_uploader(&mut self, nickname: &str) -> bool { ::db::create_user_if_not_exists(&self.config, nickname).ok(); self.update_upload_user(nickname, true) } pub fn remove_uploader(&mut self, nickname: &str) -> bool { self.update_upload_user(nickname, false) } fn update_upload_user(&self, nickname: &str, set_uploader: bool) -> bool { if ::db::user_exists(&self.config, nickname) { diesel::update( userTable.filter( server_id.eq(&self.config.table_id) ).filter( user_nickname.eq(nickname) ) ).set(is_uploader.eq(set_uploader)).execute( &self.config.connection ).ok();<|fim▁hole|> } else { false } } }<|fim▁end|>
true
<|file_name|>deriving-via-extension-hash-struct.rs<|end_file_name|><|fim▁begin|>// ignore-fast <|fim▁hole|>// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #[deriving(Hash)] struct Foo { x: int, y: int, z: int } pub fn main() {}<|fim▁end|>
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. //
<|file_name|>pyku-web.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # # pyku # ==== # Python-based random haiku generator # # Chris Collins, <[email protected]> # # v0.5 - 2013-11-15 # # Copyright (C) 2013 Chris Collins # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os debug = False debug_word = "" LOCAL_PATH = os.path.dirname(os.path.realpath(__file__)) ### TODO:### # Unhandled Exceptions: # "ai" is two syllables # "ia" may be two syllables, eg. 'negotiated' # "oa" may be two syllables, eg. 'Croation' # "-ed" is usually silent, unless following double t"s # "-ier" is usually two syllables # Some -le not being handled right: # maybe any le following a vowel is 1 syllable # and follwing a consonant is 2? # ex. "tinkle" being shown as 1 syllable # French-based words SUCK: "serviette" shown as 5 syllables # "-es" still not quite right: debauches shown as 2 syllables # What about "-iest"? 2 syllables: Roomiest being shown as 2 instead of 3 # Sonofa: Cheeseburger = 4 syllables. Tripping over the middle "e" ### Import the necessary modules ### # Import web for web.py support # Import os for web.py to Apache connection # Import RE for regular expression matching # Import random to grab random words import web import os import re if not debug_word: import random ### Set global variables ### # The starting number of syllables syllables = 0 # Our Random Word glorand_word = "" urls = ( "/", "index" ) class index: def GET(self): return render.index("pyku") def pyku(): """ Build a Haiku """ tonic = buildline(5) penultimate = buildline(7) ultimate = buildline(5) return tonic return penultimate return ultimate def buildline(line_syllables): """ Build each line from random words """ line_list = [] our_syllables = 0 while (our_syllables < line_syllables): randomword() if debug: print glorand_word if (our_syllables + syllables) > line_syllables: randomword() else: #print glorand_word #print syllables line_list.append(glorand_word) our_syllables += syllables if debug: print "My Line Syllables:", line_syllables print "My Syllables So Far:", our_syllables return ' '.join(line_list) def randomword(): """ Gets a random word from the Ubuntu american-english dictionary """ # Reset the syllable count syleq() if debug_word: random_word = debug_word else: # Open our word list text = open(os.path.join(LOCAL_PATH, "american-english")) words = text.read() random_word = random.choice(words.split()) if debug: print random_word check_possessive(random_word) def check_possessive(random_word): """ For now, we want to throw back possessive words. """ poss = re.match(r".*'s", random_word, re.IGNORECASE) if poss: randomword() else: if debug: print "Our word is:", random_word global glorand_word glorand_word = random_word vowelfind(random_word) def vowelfind(random_word): """ Find the vowel clusters in the random word """ vowel_list = "[aeiouy]+" vowels = re.findall(vowel_list, random_word, re.IGNORECASE) if vowels: vowelcount = len(vowels) if debug: print vowels global syllables syllables += vowelcount vowelcontext(random_word) else: randomword() def vowelcontext(random_word): """ Container module for running through the list of checks we need to do to count syllables. """ if debug: print "Going into 'vowelcontext':" print "Number of Syllables, maybe: ", syllables trailing_e(random_word) # Obsoleted by adding 'y' to vowel list # trailing_y(random_word) def trailing_e(random_word): """ First: Check if word ends in '-e', or optionally, '-es', not immediately preceeded by another vowel OR ending in '-que' AND does not end in '-ble' or '-ses', THEN decrements the syllable count. UNLESS - there is only 1 syllable. Cases: fare, faires, tree - matches first, does not decrement martinique - does not match first, does match second, decrements unibroue - does not match first or second, does not decrement # TODO - Unhandled Exceptions: fire - could be two syllables """ # Finds trailing -e(s) WITHOUT preceeding vowels OR ending in '-que' #trail_e = re.findall(r"[^aeiou]+?e[s]?$", random_word, re.IGNORECASE) trail_e_que = re.findall(r"((qu)|([^aeiou]))+?e[s]?$", random_word, re.IGNORECASE) # Check for '-ble or -ses' trail_ses_ble = re.findall(r"((bl)|(s))e[s]?$", random_word, re.IGNORECASE) if trail_e_que and not trail_ses_ble: if debug: print trail_e_que print """ Trailing '-e(s)' or '-que' characters and no trailing '-ble' or '-ses'.""" syldec(1) if debug: print "Leaving 'trailing_e':" print "Number of Syllables, maybe: ", syllables modcount("trailing_e") def sylinc(i): global syllables syllables += i def syldec(i): global syllables if syllables > 1: # Can't reduce to 0 syllables -= i def syleq(): global syllables syllables = 0 <|fim▁hole|> if debug: print "Leaving '" + mod + "' - " print "Number of Syllables is, maybe: ", syllables app = web.application(urls, globals()) render = web.template.render(LOCAL_PATH + "/templates/", globals={"buildline": buildline}) curdir = os.path.dirname(__file__) session = web.session.Session( app, web.session.DiskStore( os.path.join(curdir, "%s/sessions" % LOCAL_PATH)),) application = app.wsgifunc()<|fim▁end|>
def modcount(mod):
<|file_name|>CQPlotSubwidget.cpp<|end_file_name|><|fim▁begin|>// Copyright (C) 2019 - 2021 by Pedro Mendes, Rector and Visitors of the // University of Virginia, University of Heidelberg, and University // of Connecticut School of Medicine. // All rights reserved. // Copyright (C) 2017 - 2018 by Pedro Mendes, Virginia Tech Intellectual // Properties, Inc., University of Heidelberg, and University of // of Connecticut School of Medicine. // All rights reserved. // Copyright (C) 2012 - 2016 by Pedro Mendes, Virginia Tech Intellectual // Properties, Inc., University of Heidelberg, and The University // of Manchester. // All rights reserved. #include "CQPlotSubwidget.h" #include "CQPlotEditWidget.h" #include "curve2dwidget.h" #include "HistoWidget.h" #ifdef COPASI_BANDED_GRAPH #include "BandedGraphWidget.h" #endif // COPASI_BANDED_GRAPH #include <copasi/plotUI/CQSpectogramWidget.h> #include "plotwindow.h" #include "copasi/plot/CPlotSpecification.h" #include "copasi/plot/COutputDefinitionVector.h" #include "copasi/report/CKeyFactory.h" #include "copasi/core/CDataArray.h" #include "copasi/UI/CCopasiPlotSelectionDialog.h" #include "copasi/UI/CCopasiPlot2YSelectionDialog.h" #include "copasi/model/CMetabNameInterface.h" #include "copasi/CopasiDataModel/CDataModel.h" #include "copasi/UI/DataModelGUI.h" #include "copasi/UI/qtUtilities.h" #include "copasi/core/CRootContainer.h" #include "copasi/UI/CCopasiSelectionDialog.h" #include "copasi/UI/CQMultipleSelectionDialog.h" #include <QListWidgetItem> #include <QtCore/QList> #include <QtCore/QMap> #include <QMessageBox> //----------------------------------------------------------------------------- /* * Constructs a PlotWidget1 as a child of 'parent', with the * name 'name' and widget flags set to 'f'. */ CQPlotSubwidget::CQPlotSubwidget(QWidget* parent, const char* name, Qt::WindowFlags fl) : CopasiWidget(parent, name, fl) , mpCurveWidget(NULL) , mpHistoWidget(NULL) #ifdef COPASI_BANDED_GRAPH , mpBandedGraphWidget(NULL) #endif , mpSpectogramWidget(NULL) , mLastItem(NULL) { setupUi(this); mpCurveWidget = new Curve2DWidget(this); mpStack->addWidget(mpCurveWidget); mpHistoWidget = new HistoWidget(this); mpStack->addWidget(mpHistoWidget); #ifdef COPASI_BANDED_GRAPH QPushButton *buttonBandedGraph = new QPushButton(this); buttonBandedGraph->setText("New &Banded Graph"); layoutCurves->insertWidget(5, buttonBandedGraph); connect(buttonBandedGraph, SIGNAL(clicked()), this, SLOT(addBandedGraphSlot())); mpBandedGraphWidget = new BandedGraphWidget(this); mpStack->addWidget(mpBandedGraphWidget); #endif // COPASI_BANDED_GRAPH mpSpectogramWidget = new CQSpectogramWidget(this); mpStack->addWidget(mpSpectogramWidget); auto it = CTaskEnum::TaskName.begin(); for (; it != CTaskEnum::TaskName.end(); ++it) { mTaskNames << FROM_UTF8(*it); } } CPlotItem *CQPlotSubwidget::updateItem(CPlotItem *item) { if (item == NULL || !mpStack->isEnabled()) return NULL; QWidget *widget = mpStack->currentWidget(); CQPlotEditWidget *current = dynamic_cast<CQPlotEditWidget *>(widget); if (current != NULL) { if (!current->SaveToCurveSpec(item, mLastItem)) { return NULL; } } return item; } void CQPlotSubwidget::storeChanges() { if (mLastSelection.size() == 0) return; if (mLastSelection.size() == 1) { QString oldName = mLastSelection[0]->text(); CPlotItem *item = mList[oldName]; updateItem(item); <|fim▁hole|> QString newName = FROM_UTF8(item->getTitle()); if (oldName != newName) { mList.remove(oldName); mLastSelection[0]->setText(newName); mList.insert(newName, item); } } else { QList<QListWidgetItem *>::const_iterator it; for (it = mLastSelection.begin(); it != mLastSelection.end(); ++it) { // This suffices since editing the name/title is blocked. updateItem(mList[(*it)->text()]); } } } //----------------------------------------------------------------------------- /* * Destroys the object and frees any allocated resources */ CQPlotSubwidget::~CQPlotSubwidget() {} //----------------------------------------------------------------------------- //the slot... void CQPlotSubwidget::addCurveSlot() { if (mType == CPlotItem::plot2d) addCurve2D(); } #ifdef COPASI_BANDED_GRAPH void CQPlotSubwidget::addBandedGraphSlot() { if (mType == CPlotItem::plot2d) addBandedGraph(); } #endif // COPASI_BANDED_GRAPH void CQPlotSubwidget::addSpectrumSlot() { if (mType == CPlotItem::plot2d) addSpectrum(); } void CQPlotSubwidget::addHistoSlot() { if (mType == CPlotItem::plot2d) addHisto1D(); } int CQPlotSubwidget::getCurrentIndex() { return mpListPlotItems->currentRow(); } void CQPlotSubwidget::deleteCurves() { mLastSelection.clear(); for (int i = mpListPlotItems->count(); i >= 0; --i) { deleteCurve(i); } mList.clear(); mpListPlotItems->clear(); mLastSelection.clear(); } int CQPlotSubwidget::getRow(QListWidgetItem *item) { for (int i = 0; i < mpListPlotItems->count(); ++i) { if (mpListPlotItems->item(i)->text() == item->text()) return i; } return -1; } void CQPlotSubwidget::deleteCurve(QListWidgetItem *item) { if (item == NULL) return; delete mList[item->text()]; mList.remove(item->text()); mLastSelection.removeOne(item); delete mpListPlotItems->takeItem(getRow(item)); } void CQPlotSubwidget::deleteCurve(int index) { QListWidgetItem *item = mpListPlotItems->item(index); deleteCurve(item); } void CQPlotSubwidget::setCurrentIndex(int index) { if (index < 0) { mpListPlotItems->clearSelection(); return; } if (mpListPlotItems->count() == 0) return; if (index < 0 && mpListPlotItems->count() > 0) index = 0; if (index >= mpListPlotItems->count()) index = mpListPlotItems->count() - 1; mpListPlotItems->setCurrentRow(index, QItemSelectionModel::Select); } void CQPlotSubwidget::addPlotItem(CPlotItem *item) { QString title = FROM_UTF8(item->getTitle()); int count = 0; mpListPlotItems->clearSelection(); while (mList.contains(title)) { title = (FROM_UTF8(item->getTitle()) + " %1").arg(++count); } item->setTitle(TO_UTF8(title)); QListWidgetItem *listItem = new QListWidgetItem(FROM_UTF8(item->getTitle())); mpListPlotItems->addItem(listItem); mList.insert(FROM_UTF8(item->getTitle()), new CPlotItem(*item, NO_PARENT)); mpListPlotItems->setCurrentRow(mpListPlotItems->count() - 1); } CQPlotEditWidget *CQPlotSubwidget::selectControl(CPlotItem::Type type) { switch (type) { #ifdef COPASI_BANDED_GRAPH case CPlotItem::bandedGraph: { mpStack->setCurrentIndex(2); return mpBandedGraphWidget; } #endif case CPlotItem::histoItem1d: { mpStack->setCurrentIndex(1); return mpHistoWidget; } case CPlotItem::curve2d: { mpStack->setCurrentIndex(0); return mpCurveWidget; } case CPlotItem::spectogram: { #ifdef COPASI_BANDED_GRAPH mpStack->setCurrentIndex(3); #else mpStack->setCurrentIndex(2); #endif return mpSpectogramWidget; } default: return NULL; } } void CQPlotSubwidget::selectPlotItem(CPlotItem *item) { CQPlotEditWidget *current = static_cast< CQPlotEditWidget * >(mpStack->currentWidget()); if (current == NULL) return; if (item != NULL) { current = selectControl(item->getType()); } if (item == NULL) { mpStack->setEnabled(false); } current->setModel(mpDataModel->getModel()); current->LoadFromCurveSpec(item); pdelete(mLastItem); if (item != NULL) mLastItem = new CPlotItem(*item, NO_PARENT); } void CQPlotSubwidget::addCurveTab(const std::string &title, const CPlotDataChannelSpec &x, const CPlotDataChannelSpec &y) { CPlotItem *item = new CPlotItem(title, NULL, CPlotItem::curve2d); item->addChannel(x); item->addChannel(y); addPlotItem(item); } void chooseAxisFromSelection( std::vector<const CDataObject *> &vector1, std::vector<const CDataObject *> &vector2, std::vector< const CDataObject * > & vector3, std::vector<CCommonName> &objects1, std::vector<CCommonName> &objects2, std::vector< CCommonName > & objects3, std::map< std::string, std::string > & mapCNToDisplayName) { size_t i; std::vector<CCommonName>::const_iterator sit; const CDataArray *pArray; // 1. enable user to choose either a cell, an entire row/column, or even the objects themselves, if they are arrays. // 2. translate to CNs and remove duplicates // x-axis is set for single cell selection std::string cn; for (i = 0; i < vector1.size(); i++) { if (vector1[i]) // the object is not empty { // is it an array annotation? if ((pArray = dynamic_cast<const CDataArray *>(vector1[i]))) { // second argument is true as only single cell here is allowed. In this case we //can assume that the size of the return vector is 1. const CDataObject *pObject = CCopasiSelectionDialog::chooseCellMatrix(pArray, true, true, "X axis: ")[0]; if (!pObject) continue; cn = pObject->getCN(); mapCNToDisplayName[cn] = pObject->getObjectDisplayName(); } else { cn = vector1[i]->getCN(); mapCNToDisplayName[cn] = vector1[i]->getObjectDisplayName(); } // check whether cn is already on objects1 for (sit = objects1.begin(); sit != objects1.end(); ++sit) { if (*sit == cn) break; } // if not exist, input cn into objects1 if (sit == objects1.end()) { objects1.push_back(cn); } } } for (i = 0; i < vector2.size(); i++) { if (vector2[i]) { // is it an array annotation? if ((pArray = dynamic_cast<const CDataArray *>(vector2[i]))) { // second argument is set false for multi selection std::vector<const CDataObject *> vvv = CCopasiSelectionDialog::chooseCellMatrix(pArray, false, true, "Y axis: "); std::vector<const CDataObject *>::const_iterator it; for (it = vvv.begin(); it != vvv.end(); ++it) { if (!*it) continue; cn = (*it)->getCN(); //check if the CN already is in the list, if not add it. for (sit = objects2.begin(); sit != objects2.end(); ++sit) if (*sit == cn) break; mapCNToDisplayName[cn] = (*it)->getObjectDisplayName(); if (sit == objects2.end()) objects2.push_back(cn); } } else { cn = vector2[i]->getCN(); mapCNToDisplayName[cn] = vector2[i]->getObjectDisplayName(); //check if the CN already is in the list, if not add it. for (sit = objects2.begin(); sit != objects2.end(); ++sit) if (*sit == cn) break; if (sit == objects2.end()) objects2.push_back(cn); } } } for (i = 0; i < vector3.size(); i++) { if (vector3[i]) { // is it an array annotation? if ((pArray = dynamic_cast< const CDataArray * >(vector3[i]))) { // second argument is set false for multi selection std::vector< const CDataObject * > vvv = CCopasiSelectionDialog::chooseCellMatrix(pArray, false, true, "Y axis 2: "); std::vector< const CDataObject * >::const_iterator it; for (it = vvv.begin(); it != vvv.end(); ++it) { if (!*it) continue; cn = (*it)->getCN(); //check if the CN already is in the list, if not add it. for (sit = objects3.begin(); sit != objects3.end(); ++sit) if (*sit == cn) break; mapCNToDisplayName[cn] = (*it)->getObjectDisplayName(); if (sit == objects3.end()) objects3.push_back(cn); } } else { cn = vector3[i]->getCN(); mapCNToDisplayName[cn] = vector3[i]->getObjectDisplayName(); //check if the CN already is in the list, if not add it. for (sit = objects3.begin(); sit != objects3.end(); ++sit) if (*sit == cn) break; if (sit == objects3.end()) objects3.push_back(cn); } } } } void chooseAxisFromSelection( std::vector< const CDataObject * > & vector1, std::vector< const CDataObject * > & vector2, std::vector< CCommonName > & objects1, std::vector< CCommonName > & objects2, std::map< std::string, std::string > & mapCNToDisplayName) { std::vector< const CDataObject * > vector3; std::vector< CCommonName > objects3; chooseAxisFromSelection(vector1, vector2, vector3, objects1, objects2, objects3, mapCNToDisplayName); } void CQPlotSubwidget::addCurve2D() { CCopasiPlotSelectionDialog *pBrowser = new CCopasiPlotSelectionDialog(); pBrowser->setWindowTitle("New Curve"); std::vector< const CDataObject * > vector1; std::vector< const CDataObject * > vector2; pBrowser->setOutputVectors(&vector1, &vector2); assert(mpDataModel != NULL); pBrowser->setModel(mpDataModel->getModel(), CQSimpleSelectionTree::NumericValues); if (pBrowser->exec() == QDialog::Rejected) { return; } //this assumes that the vector is empty if nothing was chosen if (vector1.size() == 0 || vector2.size() == 0) { return; } std::vector< CCommonName > objects1, objects2; std::map<std::string, std::string> mapCNToDisplayName; size_t i; chooseAxisFromSelection(vector1, vector2, objects1, objects2, mapCNToDisplayName); if (objects1.size() == 1) { for (i = 0; i < objects2.size(); ++i) { addCurveTab(mapCNToDisplayName[objects2[i]] + "|" + mapCNToDisplayName[objects1[0]], objects1[0], objects2[i]); } } else if (objects2.size() == 1) { for (i = 0; i < objects1.size(); ++i) { addCurveTab(mapCNToDisplayName[objects2[0]] + "|" + mapCNToDisplayName[objects1[i]], objects1[i], objects2[0]); } } else { size_t imax; if (objects1.size() > objects2.size()) imax = objects2.size(); else imax = objects1.size(); for (i = 0; i < imax; ++i) { addCurveTab(mapCNToDisplayName[objects2[i]] + "|" + mapCNToDisplayName[objects1[i]], objects1[i], objects2[i]); } } } void CQPlotSubwidget::addSpectrum() { CCopasiPlot2YSelectionDialog * pBrowser = new CCopasiPlot2YSelectionDialog(); pBrowser->setWindowTitle("New Contour"); pBrowser->setY2Label("Z-Axis"); pBrowser->setSingleSelectionY(true); pBrowser->setSingleSelectionY2(true); std::vector< const CDataObject * > vector1; std::vector< const CDataObject * > vector2; std::vector< const CDataObject * > vector3; pBrowser->setOutputVectors(&vector1, &vector2, &vector3); assert(mpDataModel != NULL); pBrowser->setModel(mpDataModel->getModel(), CQSimpleSelectionTree::NumericValues); if (pBrowser->exec() == QDialog::Rejected) { return; } //this assumes that the vector is empty if nothing was chosen if (vector1.size() == 0 || vector2.size() == 0 || vector3.size() == 0) { return; } std::vector< CCommonName > objects1, objects2, objects3; std::map<std::string, std::string> mapCNToDisplayName; size_t i; chooseAxisFromSelection(vector1, vector2, vector3, objects1, objects2, objects3, mapCNToDisplayName); if (objects1.size() == 1) { size_t imax; if (objects3.size() > objects2.size()) imax = objects2.size(); else imax = objects3.size(); for (i = 0; i < objects2.size(); ++i) { addSpectrumTab(mapCNToDisplayName[objects2[i]] + "|" + mapCNToDisplayName[objects1[0]], objects1[0], objects2[i], objects3[i]); } } else if (objects2.size() == 1) { size_t imax; if (objects3.size() > objects1.size()) imax = objects1.size(); else imax = objects3.size(); for (i = 0; i < imax; ++i) { addSpectrumTab(mapCNToDisplayName[objects2[0]] + "|" + mapCNToDisplayName[objects1[i]], objects1[i], objects2[0], objects3[i]); } } else { size_t imax; if (objects1.size() > objects2.size()) imax = objects2.size(); else imax = objects1.size(); if (imax > objects3.size()) imax = objects3.size(); for (i = 0; i < imax; ++i) { addSpectrumTab(mapCNToDisplayName[objects2[i]] + "|" + mapCNToDisplayName[objects1[i]], objects1[i], objects2[i], objects3[i]); } } } #ifdef COPASI_BANDED_GRAPH void CQPlotSubwidget::addBandedGraphTab(const std::string &title, const CPlotDataChannelSpec &x, const CPlotDataChannelSpec &yone, const CPlotDataChannelSpec &ytwo) { CPlotItem *item = new CPlotItem(title, NULL, CPlotItem::bandedGraph); item->addChannel(x); item->addChannel(yone); item->addChannel(ytwo); addPlotItem(item); } void CQPlotSubwidget::addBandedGraph() { CCopasiPlot2YSelectionDialog * pBrowser = new CCopasiPlot2YSelectionDialog(); pBrowser->setWindowTitle("New Banded Graph"); pBrowser->setSingleSelectionY(true); pBrowser->setSingleSelectionY2(true); std::vector< const CDataObject * > vector1; std::vector< const CDataObject * > vector2; std::vector< const CDataObject * > vector3; pBrowser->setOutputVectors(&vector1, &vector2, &vector3); assert(mpDataModel != NULL); pBrowser->setModel(mpDataModel->getModel(), CQSimpleSelectionTree::NumericValues); if (pBrowser->exec() == QDialog::Rejected) { return; } //this assumes that the vector is empty if nothing was chosen if (vector1.size() == 0 || vector2.size() == 0 || vector3.size() == 0) { return; } std::vector<CCommonName> objects1, objects2, objects3; std::map<std::string, std::string> mapCNToDisplayName; size_t i; chooseAxisFromSelection(vector1, vector2, vector3, objects1, objects2, objects3, mapCNToDisplayName); if (objects1.size() == 1) { size_t imax; if (objects3.size() > objects2.size()) imax = objects2.size(); else imax = objects3.size(); for (i = 0; i < imax; ++i) { addBandedGraphTab(mapCNToDisplayName[objects2[i]] + "|" + mapCNToDisplayName[objects1[0]], objects1[0], objects2[i], objects3[i]); } } else if (objects2.size() == 1) { size_t imax; if (objects3.size() > objects1.size()) imax = objects1.size(); else imax = objects3.size(); for (i = 0; i < imax; ++i) { addBandedGraphTab(mapCNToDisplayName[objects2[0]] + "|" + mapCNToDisplayName[objects1[i]], objects1[i], objects2[0], objects3[i]); } } else { size_t imax; if (objects1.size() > objects2.size()) imax = objects2.size(); else imax = objects1.size(); if (imax > objects3.size()) imax = objects3.size(); for (i = 0; i < imax; ++i) { addBandedGraphTab(mapCNToDisplayName[objects2[i]] + "|" + mapCNToDisplayName[objects1[i]], objects1[i], objects2[i], objects3[i]); } } } #endif // COPASI_BANDED_GRAPH void CQPlotSubwidget::addSpectrumTab(const std::string &title, const CPlotDataChannelSpec &x, const CPlotDataChannelSpec &yone, const CPlotDataChannelSpec &ytwo) { CPlotItem *item = new CPlotItem(title, NULL, CPlotItem::spectogram); item->addChannel(x); item->addChannel(yone); item->addChannel(ytwo); addPlotItem(item); } void CQPlotSubwidget::addHisto1DTab(const std::string &title, const CPlotDataChannelSpec &x, const C_FLOAT64 &incr) { CPlotItem *item = new CPlotItem(title, NULL, CPlotItem::histoItem1d); item->addChannel(x); item->setValue("increment", incr); addPlotItem(item); } void CQPlotSubwidget::addHisto1D() { addHisto1DTab("Histogram", CPlotDataChannelSpec(CCommonName("")), 1.0); } void CQPlotSubwidget::createHistograms(std::vector<const CDataObject * >objects, const C_FLOAT64 &incr) { C_INT32 storeTab = getCurrentIndex(); size_t i; for (i = 1; i < objects.size(); ++i) { if (objects[i]) addHisto1DTab("Histogram: " + objects[i]->getObjectDisplayName(), CPlotDataChannelSpec(objects[i]->getCN()), incr); // lineEditTitle->setText("Histogram: " + FROM_UTF8(mpObjectX->getObjectDisplayName())); } setCurrentIndex(storeTab); } //----------------------------------------------------------------------------- void CQPlotSubwidget::removeCurve() { QList<QListWidgetItem *> selection = mpListPlotItems->selectedItems(); if (selection.size() == 0) return; if (QMessageBox::question(this, "Delete Curves", QString("Do you really want to delete the %1 selected curve(s)?").arg(selection.size()), QMessageBox::Yes, QMessageBox::No | QMessageBox::Default) == QMessageBox::Yes) { for (int index = selection.size() - 1; index >= 0; --index) { deleteCurve(selection.at(index)); } mLastSelection.clear(); } } //----------------------------------------------------------------------------- void CQPlotSubwidget::commitPlot() { saveToPlotSpec(); loadFromPlotSpec(dynamic_cast<CPlotSpecification *>(mpObject)); } //----------------------------------------------------------------------------- void CQPlotSubwidget::deletePlot() { size_t Index, Size; assert(mpDataModel != NULL); if (!mpDataModel->getModel()) return; CPlotSpecification *pspec = dynamic_cast< CPlotSpecification * >(mpObject); if (!pspec) return; Index = mpDataModel->getPlotDefinitionList()->CDataVector<CPlotSpecification>::getIndex(pspec); mpDataModel->getPlotDefinitionList()->CDataVector<CPlotSpecification>::remove(Index); std::string deletedObjectCN = mObjectCN; Size = mpDataModel->getPlotDefinitionList()->size(); if (Size > 0) enter(mpDataModel->getPlotDefinitionList()->operator[](std::min(Index, Size - 1)).getCN()); else enter(std::string()); //ListViews:: protectedNotify(ListViews::ObjectType::PLOT, ListViews::DELETE, deletedObjectCN); } //----------------------------------------------------------------------------- void CQPlotSubwidget::copyPlot() { leaveProtected(); CDataModel *pDataModel = mpObject->getObjectDataModel(); if (pDataModel == NULL) return; CPlotSpecification *pPl = new CPlotSpecification(*dynamic_cast<CPlotSpecification *>(mpObject), NO_PARENT); std::string baseName = pPl->getObjectName() + "_copy"; std::string name = baseName; int i = 1; while (pDataModel->getPlotDefinitionList()->getIndex(name) != C_INVALID_INDEX) { i++; name = baseName + TO_UTF8(QString::number(i)); } pPl->setObjectName(name); pDataModel->getPlotDefinitionList()->add(pPl, true); std::string cn = pPl->CCopasiParameter::getCN(); protectedNotify(ListViews::ObjectType::PLOT, ListViews::ADD, cn); enter(cn); mpListView->switchToOtherWidget(ListViews::WidgetType::PlotDetail, cn); } //----------------------------------------------------------------------------- void CQPlotSubwidget::addPlot() { leaveProtected(); CDataModel *pDataModel = mpObject->getObjectDataModel(); if (pDataModel == NULL) return; std::string name = "plot_"; int i = 0; CPlotSpecification *pPl = NULL; name += TO_UTF8(QString::number(i)); while (!(pPl = pDataModel->getPlotDefinitionList()->createPlotSpec(name, CPlotItem::plot2d))) { i++; name = "plot_"; name += TO_UTF8(QString::number(i)); } std::string cn = pPl->CCopasiParameter::getCN(); protectedNotify(ListViews::ObjectType::PLOT, ListViews::ADD, cn); enter(cn); mpListView->switchToOtherWidget(ListViews::WidgetType::PlotDetail, cn); } //----------------------------------------------------------------------------- void CQPlotSubwidget::resetPlot() { loadFromPlotSpec(dynamic_cast<CPlotSpecification *>(mpObject)); } #include <QInputDialog> void CQPlotSubwidget::selectTaskTypes() { CQMultipleSelectionDialog* dlg = new CQMultipleSelectionDialog(this); dlg->setWindowTitle("Select Tasks"); dlg->setMinimumHeight(400); dlg->setSelectionList(mTaskNames); QStringList currentSelection; if (!mTaskTypes.empty()) { std::istringstream ss(mTaskTypes); std::string token; while (std::getline(ss, token, ',')) { while (token[0] == ' ') // remove leading spaces token.erase(0, 1); currentSelection << FROM_UTF8(token); } } dlg->setCurrentSelection(currentSelection); if (dlg->exec() != QDialog::Accepted) return; const QStringList& selection = dlg->getSelection(); std::stringstream str; if (!selection.empty()) { auto it = selection.begin(); str << TO_UTF8(*it++); for (; it != selection.end(); ++it) { str << ", "; str << TO_UTF8(*it); } } mTaskTypes = str.str(); chkTaskTypes->setChecked(mTaskTypes.empty()); txtTaskTypes->setText(FROM_UTF8(mTaskTypes)); } void CQPlotSubwidget::allTaskTypesClicked() { if (!mTaskTypes.empty()) { mTaskTypes.clear(); txtTaskTypes->clear(); } else { selectTaskTypes(); } } //----------------------------------------------------------------------------- bool CQPlotSubwidget::loadFromPlotSpec(const CPlotSpecification *pspec) { if (!pspec) return false; mLastSelection.clear(); //title titleLineEdit->setText(pspec->getTitle().c_str()); //active? activeCheckBox->setChecked(pspec->isActive()); //type mType = pspec->getType(); mTaskTypes = pspec->getTaskTypes(); txtTaskTypes->setText(FROM_UTF8(mTaskTypes)); chkTaskTypes->setChecked(mTaskTypes.empty()); switch (mType) { #ifdef COPASI_BANDED_GRAPH case CPlotItem::bandedGraph: #endif // COPASI_BANDED_GRAPH case CPlotItem::spectogram: case CPlotItem::plot2d: checkLogX->setChecked(pspec->isLogX()); checkLogY->setChecked(pspec->isLogY()); break; default: return false; } //clear tabWidget deleteCurves(); mpListPlotItems->clearSelection(); //reconstruct tabWidget from curve specs CDataVector<CPlotItem>::const_iterator it = pspec->getItems().begin(); CDataVector<CPlotItem>::const_iterator end = pspec->getItems().end(); QStringList PlotItems; for (; it != end; ++it) { QString title = FROM_UTF8(it->getTitle()); PlotItems.append(title); CPlotItem *pItem = new CPlotItem(*it, NO_PARENT); // The copy has the same parent as the original, i.e., it has been added to the plot specification. const_cast< CPlotSpecification * >(pspec)->getItems().remove(pItem); mList.insert(title, pItem); } mpListPlotItems->addItems(PlotItems); if (pspec->getItems().size() > 0) { mpListPlotItems->setCurrentRow(0, QItemSelectionModel::Select); } else { // We need to clear the current items display selectPlotItem(NULL); } return true; //TODO really check } bool CQPlotSubwidget::saveToPlotSpec() { CPlotSpecification *pspec = dynamic_cast< CPlotSpecification * >(mpObject); if (!pspec) return true; pspec->cleanup(); //title if (pspec->getTitle() != TO_UTF8(titleLineEdit->text())) { pspec->setTitle(TO_UTF8(titleLineEdit->text())); protectedNotify(ListViews::ObjectType::PLOT, ListViews::RENAME, mObjectCN); } //active? pspec->setActive(activeCheckBox->isChecked()); //scales pspec->setLogX(checkLogX->isChecked()); pspec->setLogY(checkLogY->isChecked()); // task types pspec->setTaskTypes(mTaskTypes); //curves CPlotItem *item; storeChanges(); for (int i = 0, imax = mpListPlotItems->count(); i < imax; ++i) { CPlotItem *currentItem = mList[mpListPlotItems->item(i)->text()]; if (currentItem == NULL) continue; item = new CPlotItem(*currentItem, NO_PARENT); pspec->getItems().add(item, true); } // :TODO Bug 322: This should only be called when actual changes have been saved. // However we do not check whether the scan item are changed we delete all // and create them new. if (true) { if (mpDataModel != NULL) { mpDataModel->changed(); } // mChanged = false; } return true; } //----------------------------------------------------------------------------- //TODO: save a copy! bool CQPlotSubwidget::enterProtected() { CPlotSpecification *pspec = dynamic_cast< CPlotSpecification * >(mpObject); if (!pspec) { mpListView->switchToOtherWidget(ListViews::WidgetType::Plots, std::string()); return false; } return loadFromPlotSpec(pspec); } bool CQPlotSubwidget::areOfSameType(QList<QListWidgetItem *> &items) { if (items.size() <= 1) return true; QList<CPlotItem::Type> listOfUniqueTypes; QList<QListWidgetItem *>::const_iterator it = items.begin(); while (it != items.end()) { QString currentText = (*it)->text(); CPlotItem *item = mList[currentText]; if (!listOfUniqueTypes.contains(item->getType())) listOfUniqueTypes.append(item->getType()); ++it; } return listOfUniqueTypes.size() == 1; } void CQPlotSubwidget::itemSelectionChanged() { storeChanges(); QList<QListWidgetItem *> current = mpListPlotItems->selectedItems(); if (current.size() == 0) { mpStack->setEnabled(false); } else if (current.size() == 1) { mpStack->setEnabled(true); selectPlotItem(mList[current[0]->text()]); (static_cast<CQPlotEditWidget *>(mpStack->currentWidget()))->setMultipleEditMode(false); } else { if (!areOfSameType(current)) { mpStack->setEnabled(false); } else { mpStack->setEnabled(true); selectPlotItem(mList[current[0]->text()]); (static_cast<CQPlotEditWidget *>(mpStack->currentWidget()))->setMultipleEditMode(true); } } mLastSelection = current; } //----------------------------------------------------------------------------- bool CQPlotSubwidget::updateProtected(ListViews::ObjectType objectType, ListViews::Action action, const CCommonName & cn) { if (mIgnoreUpdates || isHidden()) return true; switch (objectType) { //TODO: check list: case ListViews::ObjectType::MODEL: switch (action) { case ListViews::DELETE: case ListViews::ADD: mpObject = NULL; mObjectCN.clear(); return enterProtected(); break; default: break; } break; case ListViews::ObjectType::PLOT: if (cn == mObjectCN) { switch (action) { case ListViews::DELETE: mpObject = NULL; mObjectCN.clear(); return enterProtected(); break; case ListViews::CHANGE: return enterProtected(); break; default: break; } } break; default: break; } return true; } //----------------------------------------------------------------------------- bool CQPlotSubwidget::leaveProtected() { return saveToPlotSpec(); }<|fim▁end|>
if (item == NULL) return;
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from models import Event from django.views.generic import DetailView, ListView class EventListView(ListView): template_name = 'agenda/event_list.html' queryset = Event.objects.upcoming() paginate_by = 20 class EventArchiveview(EventListView):<|fim▁hole|> queryset = Event.objects.past() class EventDetailView(DetailView): model = Event template_name = 'agenda/event_detail.html'<|fim▁end|>
<|file_name|>platform_utils.py<|end_file_name|><|fim▁begin|>import ctypes from contextlib import contextmanager import errno import logging import os import platform import pwd import grp import subprocess from .exceptions import CommandFailed _logger = logging.getLogger(__name__) def get_current_user_shell(): return pwd.getpwuid(os.getuid()).pw_shell def execute_command_assert_success(cmd, **kw): returned = execute_command(cmd, **kw) if returned.returncode != 0: raise CommandFailed("Command {0!r} failed with exit code {1}".format(cmd, returned.returncode)) return returned def execute_command(cmd, unsudo=False, **kw): if unsudo: cmd = _get_unsudo_command(cmd) _logger.debug("Running %r (%s)", cmd, kw) returned = subprocess.Popen(cmd, shell=True, **kw) returned.wait() _logger.debug("%r finished with exit code %s", cmd, returned.returncode) return returned def _get_unsudo_command(cmd): sudo_uid = get_sudo_uid() sudo_gid = get_sudo_gid() if not sudo_uid and not sudo_gid: return cmd prefix = "sudo " if sudo_uid is not None: prefix += "-u \\#{0} ".format(sudo_uid) if sudo_gid is not None: prefix += "-g \\#{0} ".format(sudo_gid) return prefix + cmd def get_sudo_uid(): return _int_if_not_none(os.environ.get("SUDO_UID")) def get_sudo_gid(): return _int_if_not_none(os.environ.get("SUDO_GID")) def get_sudo_groups(): sudo_uid = get_sudo_uid() if sudo_uid is None: return None return get_groups_by_uid(sudo_uid) def get_groups_by_uid(uid): try: username = pwd.getpwuid(uid).pw_name except KeyError: _logger.warning("Failed to get pwd information for uid %s", uid, exc_info=True) return [] gids = [g.gr_gid for g in grp.getgrall() if username in g.gr_mem] return gids if platform.system() == "Linux": CLONE_NEWNS = 131072 _libc = ctypes.CDLL("libc.so.6") def unshare_mounts(): return_value = _libc.unshare(CLONE_NEWNS) if 0 != return_value: errno_val = ctypes.get_errno() raise OSError("unshare() called failed (errno={0} ({1}))".format( errno_val, errno.errorcode.get(errno_val, "?") )) else: def unshare_mounts(): raise NotImplementedError("Only supported on Linux") @contextmanager def unsudo_context(): old_uid = os.geteuid() old_gid = os.getegid() sudo_uid = get_sudo_uid() sudo_gid = get_sudo_gid() if sudo_gid is not None: _logger.debug("Changing gid to %s", sudo_gid) os.setegid(sudo_gid) if sudo_uid is not None: _logger.debug("Changing uid to %s", sudo_uid) os.seteuid(sudo_uid) try: yield finally: os.seteuid(old_uid) os.setegid(old_gid)<|fim▁hole|> value = int(value) return value<|fim▁end|>
def _int_if_not_none(value): if value is not None:
<|file_name|>load_data.py<|end_file_name|><|fim▁begin|>import os import pickle import csv import pandas as pd import math from functools import lru_cache, reduce from collections import defaultdict USE_ROME_SLICING_DATASET = False # Rome slicing dataset is not ready yet if USE_ROME_SLICING_DATASET: OGR_ROME_FILE = "rome_slicing_dataset/ogr_rome_mapping.csv" ROME_FILE = "rome_slicing_dataset/rome_labels.csv" ROME_NAF_FILE = "rome_slicing_dataset/rome_naf_mapping.csv" else: OGR_ROME_FILE = "ogr_rome_mapping.csv" ROME_FILE = "rome_labels.csv" ROME_NAF_FILE = "rome_naf_mapping.csv" def load_file(func, filename): full_filename = os.path.join(os.path.dirname( os.path.realpath(__file__)), "data/%s" % filename) return func(full_filename) def load_pickle_file(filename): def f(full_filename): return pickle.load(open(full_filename, "r")) return load_file(f, filename) def load_pd_dataframe(filename, delimiter='', dtype=None): def f(full_filename): return pd.read_csv(open(full_filename, "r"), dtype=dtype) return load_file(f, filename) def load_csv_file(filename, delimiter='|'): def f(full_filename): csv_file = open(full_filename, 'r') reader = csv.reader(csv_file, delimiter=delimiter) return reader reader = load_file(f, filename) rows = [] len_previous_row = None for row in reader: if len_previous_row: # at least second line of CSV file if len(row) == 0: # skip empty rows continue elif len(row) != len_previous_row: raise IndexError( "found row with abnormal number of fields : %s" % row) rows.append(row) else: # first line of CSV file: headers should be ignored pass len_previous_row = len(row) return rows def load_rows_as_set(rows): for row in rows: if len(row) != 1: raise IndexError("wrong number of fields") return set([row[0] for row in rows]) def load_rows_as_dict(rows): d = {} for row in rows: if len(row) != 2: raise IndexError("wrong number of fields") if row[0] in d: raise ValueError("duplicate key") d[row[0]] = row[1] return d def load_rows_as_dict_of_dict(rows): d = {} for row in rows: if len(row) != 3: raise IndexError("wrong number of fields") # values of 3 fields f1 = row[0] f2 = row[1] f3 = row[2] if f1 in d:<|fim▁hole|> d[f1][f2] = f3 else: d[f1] = {f2: f3} return d @lru_cache(maxsize=None) def load_related_rome_areas(): """ Build a dict with department code (code insee) as keys and area code as values (bassins d'emploi). Used for PSE study in 2021. """ rows = load_csv_file("lbb-pse_bassin-emploi_code-insee.csv", delimiter=',') return reduce(reduceRelateRomesAreas, rows, {}) def reduceRelateRomesAreas(aggr, row): [code_insee, code_area] = row aggr[code_insee] = code_area return aggr @lru_cache(maxsize=None) def load_related_rome(): """ Build a dict with area code (bassin d'emploi) as keys. The values are dict with rome code as keys and a list of related rome codes as values. Each related rome is a dict with `rome` and `score` properties. Used for PSE study. """ rows = load_csv_file("lbb-pse_bassin-emploi_rome-connexe.csv", delimiter=',') return reduce(reduceRelateRomes, rows, {}) def reduceRelateRomes(aggr, row): [code_area, rome, rome_connexe, score] = row entry_code_area = aggr.get(code_area, {}) entry_rome = entry_code_area.get(rome, []) entry_rome.append({'rome': rome_connexe, 'score': float(score)}) entry_code_area[rome] = entry_rome aggr[code_area] = entry_code_area return aggr @lru_cache(maxsize=None) def load_city_codes(): rows = load_csv_file("city_codes.csv") commune_id_to_commune_name = load_rows_as_dict(rows) return commune_id_to_commune_name @lru_cache(maxsize=None) def load_contact_modes(): """ Use comma delimiter instead of pipe so that it is recognized by github and can easily be edited online by the intrapreneurs. """ rows = load_csv_file("contact_modes.csv", delimiter=',') naf_prefix_to_rome_to_contact_mode = load_rows_as_dict_of_dict(rows) return naf_prefix_to_rome_to_contact_mode @lru_cache(maxsize=None) def load_ogr_labels(): rows = load_csv_file("ogr_labels.csv") ogr_to_label = load_rows_as_dict(rows) return ogr_to_label @lru_cache(maxsize=None) def load_groupements_employeurs(): rows = load_csv_file("groupements_employeurs.csv") sirets = load_rows_as_set(rows) return sirets @lru_cache(maxsize=None) def load_ogr_rome_mapping(): rows = load_csv_file(OGR_ROME_FILE) OGR_COLUMN = 0 ROME_COLUMN = 1 ogr_to_rome = {} for row in rows: ogr = row[OGR_COLUMN] if ogr not in load_ogr_labels(): raise ValueError("missing label for OGR %s" % ogr) rome = row[ROME_COLUMN] if rome not in load_rome_labels(): raise ValueError("missing label for ROME %s" % rome) ogr_to_rome[ogr] = rome return ogr_to_rome @lru_cache(maxsize=None) def load_rome_labels(): rows = load_csv_file(ROME_FILE) rome_to_label = load_rows_as_dict(rows) return rome_to_label @lru_cache(maxsize=None) def load_naf_labels(): rows = load_csv_file("naf_labels.csv") naf_to_label = load_rows_as_dict(rows) return naf_to_label @lru_cache(maxsize=None) def load_rome_naf_mapping(): return load_csv_file(ROME_NAF_FILE, delimiter=',') @lru_cache(maxsize=None) def load_metiers_tension(): csv_metiers_tension = load_csv_file("metiers_tension.csv", ',') rome_to_tension = defaultdict(int) for row in csv_metiers_tension: tension_pct = row[2] rome_code = row[3] # FIXME : remove rows where tension is #N/A in the dataset, to remove this ugly check ? if tension_pct != '#N/A': tension_pct = float(tension_pct) if 0 <= tension_pct <= 100: # As a single ROME can have multiple tensions, # It has been decided to take the higher tension for a rome rome_to_tension[rome_code] = max(rome_to_tension[rome_code], tension_pct) else: raise ValueError return rome_to_tension #Used for PSE study, it returns a list of SIRET that must not b be seen on LBB @lru_cache(maxsize=None) def load_siret_to_remove(): rows = load_csv_file("untreated_BB.csv", ',') sirets_to_remove = load_rows_as_set(rows) return sirets_to_remove #Used by importer job to extract etablissement @lru_cache(maxsize=None) def load_effectif_labels(): ''' Dataframe to load look like this. code label 0 0 0-0 1 1 1-2 2 2 3-5 3 3 6-9 4 11 10-19 5 12 20-49 6 21 50-99 7 22 100-199 8 31 200-249 9 32 250-499 10 41 500-999 11 42 1000-1999 12 51 2000-4999 13 52 5000-9999 14 53 10000+ ''' def create_column(row, which='start_effectif'): ''' From the label, we want to create a start and end column to delimitate the interval We'll be able to use it to simply determine from a number of employees in an office, in which category the office belongs to ''' #we split on the label which is from type "10-19" OR 10000+ splitted_label = row['label'].split('-') if len(splitted_label) == 1: #10000+ value = math.inf if which == 'end_effectif' else 10000 else: if which == 'start_effectif': value = int(splitted_label[0]) else: value = int(splitted_label[1]) return value df = load_pd_dataframe("helpers/effectif_labels.csv", ',', dtype={'code':str}) df['start_effectif'] = df.apply(lambda row: create_column(row,'start_effectif'), axis=1) df['end_effectif'] = df.apply(lambda row: create_column(row,'end_effectif'), axis=1) return df OGR_ROME_CODES = load_ogr_rome_mapping() ROME_CODES = list(OGR_ROME_CODES.values())<|fim▁end|>
if f2 in d[f1]: raise ValueError("duplicate key") else:
<|file_name|>debug.cpp<|end_file_name|><|fim▁begin|>/* * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" #include "code/nmethod.hpp" #include "code/vtableStubs.hpp" #include "compiler/compileBroker.hpp" #include "compiler/disassembler.hpp" #include "gc_implementation/shared/markSweep.hpp" #include "gc_interface/collectedHeap.hpp" #include "interpreter/bytecodeHistogram.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "prims/privilegedStack.hpp" #include "runtime/arguments.hpp" #include "runtime/frame.hpp" #include "runtime/java.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubCodeGenerator.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" #include "runtime/vframe.hpp" #include "services/heapDumper.hpp" #include "utilities/defaultStream.hpp" #include "utilities/events.hpp" #include "utilities/top.hpp" #include "utilities/vmError.hpp" #ifdef TARGET_OS_FAMILY_linux # include "os_linux.inline.hpp" #endif #ifdef TARGET_OS_FAMILY_solaris # include "os_solaris.inline.hpp" #endif #ifdef TARGET_OS_FAMILY_windows # include "os_windows.inline.hpp" #endif #ifdef TARGET_OS_FAMILY_bsd # include "os_bsd.inline.hpp" #endif #ifndef ASSERT # ifdef _DEBUG // NOTE: don't turn the lines below into a comment -- if you're getting // a compile error here, change the settings to define ASSERT ASSERT should be defined when _DEBUG is defined. It is not intended to be used for debugging functions that do not slow down the system too much and thus can be left in optimized code. On the other hand, the code should not be included in a production version. # endif // _DEBUG #endif // ASSERT #ifdef _DEBUG # ifndef ASSERT configuration error: ASSERT must be defined in debug version # endif // ASSERT #endif // _DEBUG #ifdef PRODUCT # if -defined _DEBUG || -defined ASSERT configuration error: ASSERT et al. must not be defined in PRODUCT version # endif #endif // PRODUCT PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC FormatBufferResource::FormatBufferResource(const char * format, ...) : FormatBufferBase((char*)resource_allocate_bytes(RES_BUFSZ)) { va_list argp; va_start(argp, format); jio_vsnprintf(_buf, RES_BUFSZ, format, argp); va_end(argp); } ATTRIBUTE_PRINTF(1, 2) void warning(const char* format, ...) { if (PrintWarnings) { FILE* const err = defaultStream::error_stream(); jio_fprintf(err, "%s warning: ", VM_Version::vm_name()); va_list ap; va_start(ap, format); vfprintf(err, format, ap); va_end(ap); fputc('\n', err); } if (BreakAtWarning) BREAKPOINT; } #ifndef PRODUCT #define is_token_break(ch) (isspace(ch) || (ch) == ',') static const char* last_file_name = NULL; static int last_line_no = -1; // assert/guarantee/... may happen very early during VM initialization. // Don't rely on anything that is initialized by Threads::create_vm(). For // example, don't use tty. bool error_is_suppressed(const char* file_name, int line_no) { // The following 1-element cache requires that passed-in // file names are always only constant literals. if (file_name == last_file_name && line_no == last_line_no) return true; int file_name_len = (int)strlen(file_name); char separator = os::file_separator()[0]; const char* base_name = strrchr(file_name, separator); if (base_name == NULL) base_name = file_name; // scan the SuppressErrorAt option const char* cp = SuppressErrorAt; for (;;) { const char* sfile;<|fim▁hole|> if ((*cp) == '\0') break; sfile = cp; while ((*cp) != '\0' && !is_token_break(*cp) && (*cp) != ':') cp++; sfile_len = cp - sfile; if ((*cp) == ':') cp++; sline = 0; while ((*cp) != '\0' && isdigit(*cp)) { sline *= 10; sline += (*cp) - '0'; cp++; } // "file:line!" means the assert suppression is not silent noisy = ((*cp) == '!'); while ((*cp) != '\0' && !is_token_break(*cp)) cp++; // match the line if (sline != 0) { if (sline != line_no) continue; } // match the file if (sfile_len > 0) { const char* look = file_name; const char* look_max = file_name + file_name_len - sfile_len; const char* foundp; bool match = false; while (!match && (foundp = strchr(look, sfile[0])) != NULL && foundp <= look_max) { match = true; for (int i = 1; i < sfile_len; i++) { if (sfile[i] != foundp[i]) { match = false; break; } } look = foundp + 1; } if (!match) continue; } // got a match! if (noisy) { fdStream out(defaultStream::output_fd()); out.print_raw("[error suppressed at "); out.print_raw(base_name); char buf[16]; jio_snprintf(buf, sizeof(buf), ":%d]", line_no); out.print_raw_cr(buf); } else { // update 1-element cache for fast silent matches last_file_name = file_name; last_line_no = line_no; } return true; } if (!is_error_reported()) { // print a friendly hint: fdStream out(defaultStream::output_fd()); out.print_raw_cr("# To suppress the following error report, specify this argument"); out.print_raw ("# after -XX: or in .hotspotrc: SuppressErrorAt="); out.print_raw (base_name); char buf[16]; jio_snprintf(buf, sizeof(buf), ":%d", line_no); out.print_raw_cr(buf); } return false; } #undef is_token_break #else // Place-holder for non-existent suppression check: #define error_is_suppressed(file_name, line_no) (false) #endif // !PRODUCT void report_vm_error(const char* file, int line, const char* error_msg, const char* detail_msg) { if (Debugging || error_is_suppressed(file, line)) return; Thread* const thread = ThreadLocalStorage::get_thread_slow(); VMError err(thread, file, line, error_msg, detail_msg); err.report_and_die(); } void report_fatal(const char* file, int line, const char* message) { report_vm_error(file, line, "fatal error", message); } void report_vm_out_of_memory(const char* file, int line, size_t size, VMErrorType vm_err_type, const char* message) { if (Debugging) return; Thread* thread = ThreadLocalStorage::get_thread_slow(); VMError(thread, file, line, size, vm_err_type, message).report_and_die(); // The UseOSErrorReporting option in report_and_die() may allow a return // to here. If so then we'll have to figure out how to handle it. guarantee(false, "report_and_die() should not return here"); } void report_should_not_call(const char* file, int line) { report_vm_error(file, line, "ShouldNotCall()"); } void report_should_not_reach_here(const char* file, int line) { report_vm_error(file, line, "ShouldNotReachHere()"); } void report_unimplemented(const char* file, int line) { report_vm_error(file, line, "Unimplemented()"); } void report_untested(const char* file, int line, const char* message) { #ifndef PRODUCT warning("Untested: %s in %s: %d\n", message, file, line); #endif // !PRODUCT } void report_out_of_shared_space(SharedSpaceType shared_space) { static const char* name[] = { "native memory for metadata", "shared read only space", "shared read write space", "shared miscellaneous data space", "shared miscellaneous code space" }; static const char* flag[] = { "Metaspace", "SharedReadOnlySize", "SharedReadWriteSize", "SharedMiscDataSize", "SharedMiscCodeSize" }; warning("\nThe %s is not large enough\n" "to preload requested classes. Use -XX:%s=<size>\n" "to increase the initial size of %s.\n", name[shared_space], flag[shared_space], name[shared_space]); exit(2); } void report_java_out_of_memory(const char* message) { static jint out_of_memory_reported = 0; // A number of threads may attempt to report OutOfMemoryError at around the // same time. To avoid dumping the heap or executing the data collection // commands multiple times we just do it once when the first threads reports // the error. if (Atomic::cmpxchg(1, &out_of_memory_reported, 0) == 0) { // create heap dump before OnOutOfMemoryError commands are executed if (HeapDumpOnOutOfMemoryError) { tty->print_cr("java.lang.OutOfMemoryError: %s", message); HeapDumper::dump_heap_from_oome(); } if (OnOutOfMemoryError && OnOutOfMemoryError[0]) { VMError err(message); err.report_java_out_of_memory(); } if (CrashOnOutOfMemoryError) { tty->print_cr("Aborting due to java.lang.OutOfMemoryError: %s", message); fatal(err_msg("OutOfMemory encountered: %s", message)); } if (ExitOnOutOfMemoryError) { tty->print_cr("Terminating due to java.lang.OutOfMemoryError: %s", message); exit(3); } } } static bool error_reported = false; // call this when the VM is dying--it might loosen some asserts void set_error_reported() { error_reported = true; } bool is_error_reported() { return error_reported; } #ifndef PRODUCT #include <signal.h> void test_error_handler() { uintx test_num = ErrorHandlerTest; if (test_num == 0) return; // If asserts are disabled, use the corresponding guarantee instead. size_t n = test_num; NOT_DEBUG(if (n <= 2) n += 2); const char* const str = "hello"; const size_t num = (size_t)os::vm_page_size(); const char* const eol = os::line_separator(); const char* const msg = "this message should be truncated during formatting"; char * const dataPtr = NULL; // bad data pointer const void (*funcPtr)(void) = (const void(*)()) 0xF; // bad function pointer // Keep this in sync with test/runtime/6888954/vmerrors.sh. switch (n) { case 1: assert(str == NULL, "expected null"); case 2: assert(num == 1023 && *str == 'X', err_msg("num=" SIZE_FORMAT " str=\"%s\"", num, str)); case 3: guarantee(str == NULL, "expected null"); case 4: guarantee(num == 1023 && *str == 'X', err_msg("num=" SIZE_FORMAT " str=\"%s\"", num, str)); case 5: fatal("expected null"); case 6: fatal(err_msg("num=" SIZE_FORMAT " str=\"%s\"", num, str)); case 7: fatal(err_msg("%s%s# %s%s# %s%s# %s%s# %s%s# " "%s%s# %s%s# %s%s# %s%s# %s%s# " "%s%s# %s%s# %s%s# %s%s# %s", msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg)); case 8: vm_exit_out_of_memory(num, OOM_MALLOC_ERROR, "ChunkPool::allocate"); case 9: ShouldNotCallThis(); case 10: ShouldNotReachHere(); case 11: Unimplemented(); // There's no guarantee the bad data pointer will crash us // so "break" out to the ShouldNotReachHere(). case 12: *dataPtr = '\0'; break; // There's no guarantee the bad function pointer will crash us // so "break" out to the ShouldNotReachHere(). case 13: (*funcPtr)(); break; default: tty->print_cr("ERROR: %d: unexpected test_num value.", n); } ShouldNotReachHere(); } #endif // !PRODUCT // ------ helper functions for debugging go here ------------ // All debug entries should be wrapped with a stack allocated // Command object. It makes sure a resource mark is set and // flushes the logfile to prevent file sharing problems. class Command : public StackObj { private: ResourceMark rm; ResetNoHandleMark rnhm; HandleMark hm; bool debug_save; public: static int level; Command(const char* str) { debug_save = Debugging; Debugging = true; if (level++ > 0) return; tty->cr(); tty->print_cr("\"Executing %s\"", str); } ~Command() { tty->flush(); Debugging = debug_save; level--; } }; int Command::level = 0; #ifndef PRODUCT extern "C" void blob(CodeBlob* cb) { Command c("blob"); cb->print(); } extern "C" void dump_vtable(address p) { Command c("dump_vtable"); Klass* k = (Klass*)p; InstanceKlass::cast(k)->vtable()->print(); } extern "C" void nm(intptr_t p) { // Actually we look through all CodeBlobs (the nm name has been kept for backwards compatability) Command c("nm"); CodeBlob* cb = CodeCache::find_blob((address)p); if (cb == NULL) { tty->print_cr("NULL"); } else { cb->print(); } } extern "C" void disnm(intptr_t p) { Command c("disnm"); CodeBlob* cb = CodeCache::find_blob((address) p); nmethod* nm = cb->as_nmethod_or_null(); if (nm) { nm->print(); Disassembler::decode(nm); } else { cb->print(); Disassembler::decode(cb); } } extern "C" void printnm(intptr_t p) { char buffer[256]; sprintf(buffer, "printnm: " INTPTR_FORMAT, p); Command c(buffer); CodeBlob* cb = CodeCache::find_blob((address) p); if (cb->is_nmethod()) { nmethod* nm = (nmethod*)cb; nm->print_nmethod(true); } } extern "C" void universe() { Command c("universe"); Universe::print(); } extern "C" void verify() { // try to run a verify on the entire system // note: this may not be safe if we're not at a safepoint; for debugging, // this manipulates the safepoint settings to avoid assertion failures Command c("universe verify"); bool safe = SafepointSynchronize::is_at_safepoint(); if (!safe) { tty->print_cr("warning: not at safepoint -- verify may fail"); SafepointSynchronize::set_is_at_safepoint(); } // Ensure Eden top is correct before verification Universe::heap()->prepare_for_verify(); Universe::verify(); if (!safe) SafepointSynchronize::set_is_not_at_safepoint(); } extern "C" void pp(void* p) { Command c("pp"); FlagSetting fl(PrintVMMessages, true); FlagSetting f2(DisplayVMOutput, true); if (Universe::heap()->is_in(p)) { oop obj = oop(p); obj->print(); } else { tty->print(PTR_FORMAT, p); } } // pv: print vm-printable object extern "C" void pa(intptr_t p) { ((AllocatedObj*) p)->print(); } extern "C" void findpc(intptr_t x); #endif // !PRODUCT extern "C" void ps() { // print stack if (Thread::current() == NULL) return; Command c("ps"); // Prints the stack of the current Java thread JavaThread* p = JavaThread::active(); tty->print(" for thread: "); p->print(); tty->cr(); if (p->has_last_Java_frame()) { // If the last_Java_fp is set we are in C land and // can call the standard stack_trace function. #ifdef PRODUCT p->print_stack(); } else { tty->print_cr("Cannot find the last Java frame, printing stack disabled."); #else // !PRODUCT p->trace_stack(); } else { frame f = os::current_frame(); RegisterMap reg_map(p); f = f.sender(&reg_map); tty->print("(guessing starting frame id=%#p based on current fp)\n", f.id()); p->trace_stack_from(vframe::new_vframe(&f, &reg_map, p)); pd_ps(f); #endif // PRODUCT } } extern "C" void pfl() { // print frame layout Command c("pfl"); JavaThread* p = JavaThread::active(); tty->print(" for thread: "); p->print(); tty->cr(); if (p->has_last_Java_frame()) { p->print_frame_layout(); } } #ifndef PRODUCT extern "C" void psf() { // print stack frames { Command c("psf"); JavaThread* p = JavaThread::active(); tty->print(" for thread: "); p->print(); tty->cr(); if (p->has_last_Java_frame()) { p->trace_frames(); } } } extern "C" void threads() { Command c("threads"); Threads::print(false, true); } extern "C" void psd() { Command c("psd"); SystemDictionary::print(); } extern "C" void safepoints() { Command c("safepoints"); SafepointSynchronize::print_state(); } #endif // !PRODUCT extern "C" void pss() { // print all stacks if (Thread::current() == NULL) return; Command c("pss"); Threads::print(true, PRODUCT_ONLY(false) NOT_PRODUCT(true)); } #ifndef PRODUCT extern "C" void debug() { // to set things up for compiler debugging Command c("debug"); WizardMode = true; PrintVMMessages = PrintCompilation = true; PrintInlining = PrintAssembly = true; tty->flush(); } extern "C" void ndebug() { // undo debug() Command c("ndebug"); PrintCompilation = false; PrintInlining = PrintAssembly = false; tty->flush(); } extern "C" void flush() { Command c("flush"); tty->flush(); } extern "C" void events() { Command c("events"); Events::print(); } extern "C" Method* findm(intptr_t pc) { Command c("findm"); nmethod* nm = CodeCache::find_nmethod((address)pc); return (nm == NULL) ? (Method*)NULL : nm->method(); } extern "C" nmethod* findnm(intptr_t addr) { Command c("findnm"); return CodeCache::find_nmethod((address)addr); } // Another interface that isn't ambiguous in dbx. // Can we someday rename the other find to hsfind? extern "C" void hsfind(intptr_t x) { Command c("hsfind"); os::print_location(tty, x, false); } extern "C" void find(intptr_t x) { Command c("find"); os::print_location(tty, x, false); } extern "C" void findpc(intptr_t x) { Command c("findpc"); os::print_location(tty, x, true); } // Need method pointer to find bcp, when not in permgen. extern "C" void findbcp(intptr_t method, intptr_t bcp) { Command c("findbcp"); Method* mh = (Method*)method; if (!mh->is_native()) { tty->print_cr("bci_from(%p) = %d; print_codes():", mh, mh->bci_from(address(bcp))); mh->print_codes_on(tty); } } // int versions of all methods to avoid having to type type casts in the debugger void pp(intptr_t p) { pp((void*)p); } void pp(oop p) { pp((void*)p); } void help() { Command c("help"); tty->print_cr("basic"); tty->print_cr(" pp(void* p) - try to make sense of p"); tty->print_cr(" pv(intptr_t p)- ((PrintableResourceObj*) p)->print()"); tty->print_cr(" ps() - print current thread stack"); tty->print_cr(" pss() - print all thread stacks"); tty->print_cr(" pm(int pc) - print Method* given compiled PC"); tty->print_cr(" findm(intptr_t pc) - finds Method*"); tty->print_cr(" find(intptr_t x) - finds & prints nmethod/stub/bytecode/oop based on pointer into it"); tty->print_cr(" pns(void* sp, void* fp, void* pc) - print native (i.e. mixed) stack trace. E.g."); tty->print_cr(" pns($sp, $rbp, $pc) on Linux/amd64 and Solaris/amd64 or"); tty->print_cr(" pns($sp, $ebp, $pc) on Linux/x86 or"); tty->print_cr(" pns($sp, 0, $pc) on Linux/ppc64 or"); tty->print_cr(" pns($sp + 0x7ff, 0, $pc) on Solaris/SPARC"); tty->print_cr(" - in gdb do 'set overload-resolution off' before calling pns()"); tty->print_cr(" - in dbx do 'frame 1' before calling pns()"); tty->print_cr("misc."); tty->print_cr(" flush() - flushes the log file"); tty->print_cr(" events() - dump events from ring buffers"); tty->print_cr("compiler debugging"); tty->print_cr(" debug() - to set things up for compiler debugging"); tty->print_cr(" ndebug() - undo debug"); } #endif // !PRODUCT void print_native_stack(outputStream* st, frame fr, Thread* t, char* buf, int buf_size) { // see if it's a valid frame if (fr.pc()) { st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)"); int count = 0; while (count++ < StackPrintLimit) { fr.print_on_error(st, buf, buf_size); st->cr(); // Compiled code may use EBP register on x86 so it looks like // non-walkable C frame. Use frame.sender() for java frames. if (t && t->is_Java_thread()) { // Catch very first native frame by using stack address. // For JavaThread stack_base and stack_size should be set. if (!t->on_local_stack((address)(fr.real_fp() + 1))) { break; } if (fr.is_java_frame() || fr.is_native_frame() || fr.is_runtime_frame()) { RegisterMap map((JavaThread*)t, false); // No update fr = fr.sender(&map); } else { fr = os::get_sender_for_C_frame(&fr); } } else { // is_first_C_frame() does only simple checks for frame pointer, // it will pass if java compiled code has a pointer in EBP. if (os::is_first_C_frame(&fr)) break; fr = os::get_sender_for_C_frame(&fr); } } if (count > StackPrintLimit) { st->print_cr("...<more frames>..."); } st->cr(); } } #ifndef PRODUCT extern "C" void pns(void* sp, void* fp, void* pc) { // print native stack Command c("pns"); static char buf[O_BUFLEN]; Thread* t = ThreadLocalStorage::get_thread_slow(); // Call generic frame constructor (certain arguments may be ignored) frame fr(sp, fp, pc); print_native_stack(tty, fr, t, buf, sizeof(buf)); } #endif // !PRODUCT<|fim▁end|>
int sfile_len; int sline; bool noisy; while ((*cp) != '\0' && is_token_break(*cp)) cp++;
<|file_name|>index.js<|end_file_name|><|fim▁begin|>exports.view = function() { this.render(); }; exports.async = function() {<|fim▁hole|><|fim▁end|>
this.render(); };
<|file_name|>inner-module.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // -*- rust -*- mod inner { pub mod inner2 { pub fn hello() { debug!("hello, modular world"); } } pub fn hello() { inner2::hello(); } } <|fim▁hole|><|fim▁end|>
pub fn main() { inner::hello(); inner::inner2::hello(); }
<|file_name|>DbOpenHelper.java<|end_file_name|><|fim▁begin|>package com.soulkey.calltalent.db; import android.content.Context; import android.database.sqlite.SQLiteDatabase; import android.database.sqlite.SQLiteOpenHelper; import com.soulkey.calltalent.db.model.SettingModel; import com.soulkey.calltalent.db.populator.SettingPopulator; public final class DbOpenHelper extends SQLiteOpenHelper {<|fim▁hole|> public static DbOpenHelper getInstance(Context context) { if (null == instance) { instance = new DbOpenHelper(context); } return instance; } private DbOpenHelper(Context context) { super(context, DB_NAME, null, DB_VERSION); } @Override public void onCreate(SQLiteDatabase db) { db.execSQL(SettingModel.CREATE_TABLE); populateDb(db); } @Override public void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) { } private void populateDb(SQLiteDatabase db) { SettingPopulator.populate(db); } }<|fim▁end|>
public static final String DB_NAME = "calltalent.db"; private static final int DB_VERSION = 1; private static DbOpenHelper instance;
<|file_name|>ILogicProcessConsole.java<|end_file_name|><|fim▁begin|><|fim▁hole|>package org.mo.logic.process; import org.mo.com.xml.IXmlObject; import org.mo.eng.store.IXmlConfigConsole; public interface ILogicProcessConsole extends IXmlConfigConsole<IXmlObject> { }<|fim▁end|>
<|file_name|>portability.js<|end_file_name|><|fim▁begin|>( function( $ ) { "use strict"; // Extend etCorePortability since it is declared by localization. window.etCore.portability = $.extend( etCorePortability, { cancelled: false, boot: function( $instance ) { var $this = this; var $customizeHeader = $( '#customize-header-actions' ); var $customizePortability = $( '.et-core-customize-controls-close' ); // Moved portability button into customizer header if ( $customizeHeader.length && $customizePortability.length ) { $customizeHeader.append( $customizePortability ); } $( '[data-et-core-portability]' ).each( function() { $this.listen( $( this ) ); } ); // Release unecessary cache. etCorePortability = null; }, listen: function( $el ) { var $this = this; $el.find( '[data-et-core-portability-export]' ).click( function( e ) { e.preventDefault(); if ( ! $this.actionsDisabled() ) { $this.disableActions(); $this.export(); } } ); $el.find( '.et-core-portability-export-form input[type="text"]' ).on( 'keydown', function( e ) { if ( 13 === e.keyCode ) { e.preventDefault(); $el.find( '[data-et-core-portability-export]' ).click(); } } ); // Portability populate import. $el.find( '.et-core-portability-import-form input[type="file"]' ).on( 'change', function( e ) { $this.populateImport( $( this ).get( 0 ).files[0] ); } ); $el.find( '.et-core-portability-import' ).click( function( e ) { e.preventDefault(); if ( ! $this.actionsDisabled() ) { $this.disableActions(); $this.import(); } } ); // Trigger file window. $el.find( '.et-core-portability-import-form button' ).click( function( e ) { e.preventDefault(); $this.instance( 'input[type="file"]' ).trigger( 'click' ); } ); // Cancel request. $el.find( '[data-et-core-portability-cancel]' ).click( function( e ) { e.preventDefault(); $this.cancel(); } ) }, validateImportFile: function( file, noOutput ) { if ( undefined !== file && 'undefined' != typeof file.name && 'undefined' != typeof file.type && 'json' == file.name.split( '.' ).slice( -1 )[0] ) { return true; } if ( ! noOutput ) { etCore.modalContent( '<p>' + this.text.invalideFile + '</p>', false, 3000, '#et-core-portability-import' ); } this.enableActions(); return false; }, populateImport: function( file ) { if ( ! this.validateImportFile( file ) ) { return; } $( '.et-core-portability-import-placeholder' ).text( file.name ); }, import: function( noBackup ) { var $this = this, file = $this.instance( 'input[type="file"]' ).get( 0 ).files[0]; if ( undefined === window.FormData ) { etCore.modalContent( '<p>' + this.text.browserSupport + '</p>', false, 3000, '#et-core-portability-import' ); $this.enableActions(); return; } if ( ! $this.validateImportFile( file ) ) { return; } $this.addProgressBar( $this.text.importing ); // Export Backup if set. if ( $this.instance( '[name="et-core-portability-import-backup"]' ).is( ':checked' ) && ! noBackup ) { $this.export( true ); $( $this ).on( 'exported', function() { $this.import( true ); } ); return; } $this.ajaxAction( { action: 'et_core_portability_import', file: file, nonce: $this.nonces.import }, function( response ) { etCore.modalContent( '<div class="et-core-loader et-core-loader-success"></div>', false, 3000, '#et-core-portability-import' ); $this.toggleCancel(); $( document ).delay( 3000 ).queue( function() { etCore.modalContent( '<div class="et-core-loader"></div>', false, false, '#et-core-portability-import' ); $( this ).dequeue().delay( 2000 ).queue( function() { // Save post content for individual content. if ( 'undefined' !== typeof response.data.postContent ) { var save = $( '#save-action #save-post' ); if ( save.length === 0 ) { save = $( '#publishing-action input[type="submit"]' ); } if ( 'undefined' !== typeof window.tinyMCE && window.tinyMCE.get( 'content' ) && ! window.tinyMCE.get( 'content' ).isHidden() ) { var editor = window.tinyMCE.get( 'content' ); editor.setContent( $.trim( response.data.postContent ), { format: 'html' } ); } else { $( '#content' ).val( $.trim( response.data.postContent ) ); } save.trigger( 'click' ); window.onbeforeunload = function() { $( 'body' ).fadeOut( 500 ); } } else { $( 'body' ).fadeOut( 500, function() { // Remove confirmation popup before relocation. $( window ).unbind( 'beforeunload' ); window.location = window.location.href.replace(/reset\=true\&|\&reset\=true/,''); } ) } } ); } ); }, true ); }, export: function( backup ) { var $this = this, progressBarMessages = backup ? $this.text.backuping : $this.text.exporting; $this.save( function() { var posts = {}, content = false; // Include selected posts. if ( $this.instance( '[name="et-core-portability-posts"]' ).is( ':checked' ) ) { $( '#posts-filter [name="post[]"]:checked:enabled' ).each( function() { posts[this.id] = this.value; } ); // do not proceed and display error message if no Items selected if ( $.isEmptyObject( posts ) ) { etCore.modalContent( '<div class="et-core-loader et-core-loader-fail"></div><h3>' + $this.text.noItemsSelected + '</h3>', false, true, '#' + $this.instance( '.ui-tabs-panel:visible' ).attr( 'id' ) ); $this.enableActions(); return; } } $this.addProgressBar( progressBarMessages ); // Get post layout. if ( 'undefined' !== typeof window.tinyMCE && window.tinyMCE.get( 'content' ) && ! window.tinyMCE.get( 'content' ).isHidden() ) { content = window.tinyMCE.get( 'content' ).getContent(); } else if ( $( 'textarea#content' ).length > 0 ) { content = $( 'textarea#content' ).val(); } if ( false !== content ) { content = content.replace( /^([^\[]*){1}/, '' ); content = content.replace( /([^\]]*)$/, '' ); } $this.ajaxAction( { action: 'et_core_portability_export', content: content, selection: $.isEmptyObject( posts ) ? false : JSON.stringify( posts ), nonce: $this.nonces.export }, function( response ) { var time = ' ' + new Date().toJSON().replace( 'T', ' ' ).replace( ':', 'h' ).substring( 0, 16 ), downloadURL = $this.instance( '[data-et-core-portability-export]' ).data( 'et-core-portability-export' ), query = { 'timestamp': response.data.timestamp, 'name': encodeURIComponent( $this.instance( '.et-core-portability-export-form input' ).val() + ( backup ? time : '' ) ), }; $.each( query, function( key, value ) { if ( value ) { downloadURL = downloadURL + '&' + key + '=' + value; } } ); // Remove confirmation popup before relocation. $( window ).unbind( 'beforeunload' ); window.location.assign( encodeURI( downloadURL ) ); if ( ! backup ) { etCore.modalContent( '<div class="et-core-loader et-core-loader-success"></div>', false, 3000, '#et-core-portability-export' ); $this.toggleCancel(); } $( $this ).trigger( 'exported' ); } ); } ); }, exportFB: function( exportUrl, postId, content, fileName, importFile, page ) { var $this = this; page = typeof page === 'undefined' ? 1 : page; $.ajax( { type: 'POST', url: etCore.ajaxurl, dataType: 'json', data: { action: 'et_core_portability_export', content: content, timestamp: 0, nonce: $this.nonces.export, post: postId, context: 'et_builder', page: page, }, success: function( response ) { var errorEvent = document.createEvent( 'Event' ); errorEvent.initEvent( 'et_fb_layout_export_error', true, true ); // The error is unknown but most of the time it would be cased by the server max size being exceeded. if ( 'string' === typeof response && '0' === response ) { window.et_fb_export_layout_message = $this.text.maxSizeExceeded; window.dispatchEvent( errorEvent ); return; } // Memory size set on server is exhausted. else if ( 'string' === typeof response && response.toLowerCase().indexOf( 'memory size' ) >= 0 ) { window.et_fb_export_layout_message = $this.text.memoryExhausted; window.dispatchEvent( errorEvent ); return; } // Paginate. else if ( 'undefined' !== typeof response.page ) { if ( $this.cancelled ) { return; } return $this.exportFB(exportUrl, postId, content, fileName, importFile, (page + 1)); } else if ( 'undefined' !== typeof response.data && 'undefined' !== typeof response.data.message ) { window.et_fb_export_layout_message = $this.text[response.data.message]; window.dispatchEvent( errorEvent ); return; } var time = ' ' + new Date().toJSON().replace( 'T', ' ' ).replace( ':', 'h' ).substring( 0, 16 ), downloadURL = exportUrl, query = { 'timestamp': response.data.timestamp, 'name': '' !== fileName ? fileName : encodeURIComponent( time ), }; $.each( query, function( key, value ) { if ( value ) { downloadURL = downloadURL + '&' + key + '=' + value; } } ); // Remove confirmation popup before relocation. $( window ).unbind( 'beforeunload' ); window.location.assign( encodeURI( downloadURL ) ); // perform import if needed if ( typeof importFile !== 'undefined' ) { $this.importFB( importFile, postId ); } else { var event = document.createEvent( 'Event' ); event.initEvent( 'et_fb_layout_export_finished', true, true ); // trigger event to communicate with FB window.dispatchEvent( event ); } }<|fim▁hole|> var $this = this; var errorEvent = document.createEvent( 'Event' ); window.et_fb_import_progress = 0; window.et_fb_import_estimation = 1; errorEvent.initEvent( 'et_fb_layout_import_error', true, true ); if ( undefined === window.FormData ) { window.et_fb_import_layout_message = this.text.browserSupport; window.dispatchEvent( errorEvent ); return; } if ( ! $this.validateImportFile( file, true ) ) { window.et_fb_import_layout_message = this.text.invalideFile; window.dispatchEvent( errorEvent ); return; } var fileSize = Math.ceil( ( file.size / ( 1024 * 1024 ) ).toFixed( 2 ) ), formData = new FormData(), requestData = { action: 'et_core_portability_import', file: file, content: false, timestamp: 0, nonce: $this.nonces.import, post: postId, context: 'et_builder' }; // Max size set on server is exceeded. if ( fileSize >= $this.postMaxSize || fileSize >= $this.uploadMaxSize ) { window.et_fb_import_layout_message = this.text.maxSizeExceeded; window.dispatchEvent( errorEvent ); return; } $.each( requestData, function( name, value ) { formData.append( name, value); } ); var importFBAjax = function( importData ) { $.ajax( { type: 'POST', url: etCore.ajaxurl, processData: false, contentType: false, data: formData, success: function( response ) { var event = document.createEvent( 'Event' ); event.initEvent( 'et_fb_layout_import_in_progress', true, true ); // Handle known error if ( ! response.success && 'undefined' !== typeof response.data && 'undefined' !== typeof response.data.message && 'undefined' !== typeof $this.text[ response.data.message ] ) { window.et_fb_import_layout_message = $this.text[ response.data.message ]; window.dispatchEvent( errorEvent ); } // The error is unknown but most of the time it would be cased by the server max size being exceeded. else if ( 'string' === typeof response && ('0' === response || '' === response) ) { window.et_fb_import_layout_message = $this.text.maxSizeExceeded; window.dispatchEvent( errorEvent ); return; } // Memory size set on server is exhausted. else if ( 'string' === typeof response && response.toLowerCase().indexOf( 'memory size' ) >= 0 ) { window.et_fb_import_layout_message = $this.text.memoryExhausted; window.dispatchEvent( errorEvent ); return; } // Pagination else if ( 'undefined' !== typeof response.page && 'undefined' !== typeof response.total_pages ) { // Update progress bar var progress = Math.ceil( ( response.page * 100 ) / response.total_pages ); var estimation = Math.ceil( ( ( response.total_pages - response.page ) * 6 ) / 60 ); window.et_fb_import_progress = progress; window.et_fb_import_estimation = estimation; // Import data var nextImportData = importData; nextImportData.append( 'page', ( parseInt(response.page) + 1 ) ); nextImportData.append( 'timestamp', response.timestamp ); nextImportData.append( 'file', null ); importFBAjax( nextImportData ); // trigger event to communicate with FB window.dispatchEvent( event ); } else { // Update progress bar window.et_fb_import_progress = 100; window.et_fb_import_estimation = 0; // trigger event to communicate with FB window.dispatchEvent( event ); // Allow some time for animations to animate setTimeout( function() { var event = document.createEvent( 'Event' ); event.initEvent( 'et_fb_layout_import_finished', true, true ); // save the data into global variable for later use in FB window.et_fb_import_layout_response = response; // trigger event to communicate with FB (again) window.dispatchEvent( event ); }, 1300 ); } } } ); }; importFBAjax(formData) }, ajaxAction: function( data, callback, fileSupport ) { var $this = this; // Reset cancelled. this.cancelled = false; data = $.extend( { nonce: $this.nonce, file: null, content: false, timestamp: 0, post: $( '#post_ID' ).val(), context: $this.instance().data( 'et-core-portability' ), page: 1, }, data ); var ajax = { type: 'POST', url: etCore.ajaxurl, data: data, success: function( response ) { // The error is unknown but most of the time it would be caused by the server max size being exceeded. if ( 'string' === typeof response && '0' === response ) { etCore.modalContent( '<p>' + $this.text.maxSizeExceeded + '</p>', false, true, '#' + $this.instance( '.ui-tabs-panel:visible' ).attr( 'id' ) ); $this.enableActions(); return; } // Memory size set on server is exhausted. else if ( 'string' === typeof response && response.toLowerCase().indexOf( 'memory size' ) >= 0 ) { etCore.modalContent( '<p>' + $this.text.memoryExhausted + '</p>', false, true, '#' + $this.instance( '.ui-tabs-panel:visible' ).attr( 'id' ) ); $this.enableActions(); return; } // Paginate. else if ( 'undefined' !== typeof response.page ) { var progress = Math.ceil( ( response.page * 100 ) / response.total_pages ); if ( $this.cancelled ) { return; } $this.toggleCancel( true ); $this.ajaxAction( $.extend( data, { page: parseInt( response.page ) + 1, timestamp: response.timestamp, file: null, } ), callback, false ); $this.instance( '.et-core-progress-bar' ) .width( progress + '%' ) .text( progress + '%' ); $this.instance( '.et-core-progress-subtext span' ).text( Math.ceil( ( ( response.total_pages - response.page ) * 6 ) / 60 ) ); return; } else if ( 'undefined' !== typeof response.data && 'undefined' !== typeof response.data.message ) { etCore.modalContent( '<p>' + $this.text[response.data.message] + '</p>', false, 3000, '#' + $this.instance( '.ui-tabs-panel:visible' ).attr( 'id' ) ); $this.enableActions(); return; } // Timestamp when AJAX response is received var ajax_returned_timestamp = new Date().getTime(); // Animate Progresss Bar var animateCoreProgressBar = function( DOMHighResTimeStamp ) { // Check has been performed for 3s and progress bar DOM still can't be found, consider it fail to avoid infinite loop var current_timestamp = new Date().getTime(); if ((current_timestamp - ajax_returned_timestamp) > 3000) { $this.enableActions(); etCore.modalContent( '<div class="et-core-loader et-core-loader-fail"></div>', false, 3000, '#' + $this.instance( '.ui-tabs-panel:visible' ).attr( 'id' ) ); return; } // Check if core progress DOM exists if ($this.instance( '.et-core-progress' ).length ) { $this.instance( '.et-core-progress' ) .removeClass( 'et-core-progress-striped' ) .find( '.et-core-progress-bar' ).width( '100%' ) .text( '100%' ) .delay( 1000 ) .queue( function() { $this.enableActions(); if ( 'undefined' === typeof response.data || ( 'undefined' !== typeof response.data && ! response.data.timestamp ) ) { etCore.modalContent( '<div class="et-core-loader et-core-loader-fail"></div>', false, 3000, '#' + $this.instance( '.ui-tabs-panel:visible' ).attr( 'id' ) ); return; } $( this ).dequeue(); callback( response ); } ); } else { // Recheck on the next animation frame window.requestAnimationFrame(animateCoreProgressBar); } } animateCoreProgressBar(); } }; if ( fileSupport ) { var fileSize = Math.ceil( ( data.file.size / ( 1024 * 1024 ) ).toFixed( 2 ) ), formData = new FormData(); // Max size set on server is exceeded. if ( fileSize >= $this.postMaxSize || fileSize >= $this.uploadMaxSize ) { etCore.modalContent( '<p>' + $this.text.maxSizeExceeded + '</p>', false, true, '#' + $this.instance( '.ui-tabs-panel:visible' ).attr( 'id' ) ); $this.enableActions(); return; } $.each( ajax.data, function( name, value ) { formData.append( name, value); } ); ajax = $.extend( ajax, { data: formData, processData: false, contentType : false, } ); } $.ajax( ajax ); }, // This function should be overwritten for options portability type to make sure data are saved before exporting. save: function( callback ) { if ( 'undefined' !== typeof wp && 'undefined' !== typeof wp.customize ) { var saveCallback = function() { callback(); wp.customize.unbind( 'saved', saveCallback ); } $( '#save' ).click(); wp.customize.bind( 'saved', saveCallback ); } else { // Add a slight delay for animation purposes. setTimeout( function() { callback(); }, 1000 ) } }, addProgressBar: function( message ) { etCore.modalContent( '<div class="et-core-progress et-core-progress-striped et-core-active"><div class="et-core-progress-bar" style="width: 10%;">1%</div><span class="et-core-progress-subtext">' + message + '</span></div>', false, false, '#' + this.instance( '.ui-tabs-panel:visible' ).attr( 'id' ) ); }, actionsDisabled: function() { if ( this.instance( '.et-core-modal-action' ).hasClass( 'et-core-disabled' ) ) { return true; } return false; }, disableActions: function() { this.instance( '.et-core-modal-action' ).addClass( 'et-core-disabled' ); }, enableActions: function() { this.instance( '.et-core-modal-action' ).removeClass( 'et-core-disabled' ); }, toggleCancel: function( cancel ) { var $target = this.instance( '.ui-tabs-panel:visible [data-et-core-portability-cancel]' ); if ( cancel && ! $target.is( ':visible' ) ) { $target.show().animate( { opacity: 1 }, 600 ); } else if ( ! cancel && $target.is( ':visible' ) ) { $target.animate( { opacity: 0 }, 600, function() { $( this ).hide(); } ); } }, cancel: function( cancel ) { this.cancelled = true; // Remove all temp files. Set a delay as temp files might still be in the process of being added. setTimeout( function() { $.ajax( { type: 'POST', url: etCore.ajaxurl, data: { nonce: this.nonces.cancel, context: this.instance().data( 'et-core-portability' ), action: 'et_core_portability_cancel', } } ); }.bind( this ), 3000 ); etCore.modalContent( '<div class="et-core-loader et-core-loader-success"></div>', false, 3000, '#' + this.instance( '.ui-tabs-panel:visible' ).attr( 'id' ) ); this.toggleCancel(); this.enableActions(); }, instance: function( element ) { return $( '.et-core-active[data-et-core-portability]' + ( element ? ' ' + element : '' ) ); }, } ); $( document ).ready( function() { window.etCore.portability.boot(); }); })( jQuery );<|fim▁end|>
} ); }, importFB: function( file, postId ) {
<|file_name|>common_tags.py<|end_file_name|><|fim▁begin|># This file is part of FNP-Redakcja, licensed under GNU Affero GPLv3 or later.<|fim▁hole|># from django import template register = template.Library() @register.filter def username(user): return ("%s %s" % (user.first_name, user.last_name)).lstrip() or user.username<|fim▁end|>
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
<|file_name|>Book.java<|end_file_name|><|fim▁begin|>package ch.bfh.swos.bookapp.jpa.model; import javax.persistence.*; import java.io.Serializable; import java.util.Date; import static javax.persistence.GenerationType.IDENTITY; import static javax.persistence.TemporalType.DATE; /** * Entity implementation class for Entity: Book * */ @Entity public class Book implements Serializable { @Id @GeneratedValue(strategy = IDENTITY) private Long id; private String bookId; private String title; @Temporal(DATE) private Date releaseDate; private static final long serialVersionUID = 1L; @ManyToOne private Author author; public Book() { super(); } public Long getId() { return this.id; } public void setId(Long id) { this.id = id; } public String getBookId() { return bookId; } public void setBookId(String bookId) { this.bookId = bookId; } public String getTitle() { return this.title; } public void setTitle(String title) { this.title = title; } public Date getReleaseDate() { return this.releaseDate;<|fim▁hole|> } public void setReleaseDate(Date releaseDate) { this.releaseDate = releaseDate; } public Author getAuthor() { return author; } public void setAuthor(Author author) { this.author = author; } }<|fim▁end|>
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>import re import ast from setuptools import setup, find_packages _version_re = re.compile(r'__version__\s+=\s+(.*)') with open('mycli/__init__.py', 'rb') as f: version = str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1))) description = 'CLI for MySQL Database. With auto-completion and syntax highlighting.' setup( name='mycli', author='Amjith Ramanujam', author_email='amjith[dot]r[at]gmail.com', version=version, license='LICENSE.txt', url='http://mycli.net', packages=find_packages(), package_data={'mycli': ['myclirc', '../AUTHORS', '../SPONSORS']}, description=description, long_description=description, install_requires=[ 'click >= 4.1', 'Pygments >= 2.0', # Pygments has to be Capitalcased. WTF? 'prompt_toolkit==0.46', 'PyMySQL >= 0.6.6', 'sqlparse >= 0.1.16', 'configobj >= 5.0.6', ], entry_points=''' [console_scripts] mycli=mycli.main:cli ''', classifiers=[ 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: Unix', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: SQL', 'Topic :: Database', 'Topic :: Database :: Front-Ends', 'Topic :: Software Development',<|fim▁hole|><|fim▁end|>
'Topic :: Software Development :: Libraries :: Python Modules', ], )
<|file_name|>spark_kubernetes.py<|end_file_name|><|fim▁begin|># # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations<|fim▁hole|> from kubernetes import client from airflow.exceptions import AirflowException from airflow.providers.cncf.kubernetes.hooks.kubernetes import KubernetesHook from airflow.sensors.base import BaseSensorOperator class SparkKubernetesSensor(BaseSensorOperator): """ Checks sparkApplication object in kubernetes cluster: .. seealso:: For more detail about Spark Application Object have a look at the reference: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/v1beta2-1.1.0-2.4.5/docs/api-docs.md#sparkapplication :param application_name: spark Application resource name :type application_name: str :param namespace: the kubernetes namespace where the sparkApplication reside in :type namespace: str :param kubernetes_conn_id: The :ref:`kubernetes connection<howto/connection:kubernetes>` to Kubernetes cluster. :type kubernetes_conn_id: str :param attach_log: determines whether logs for driver pod should be appended to the sensor log :type attach_log: bool :param api_group: kubernetes api group of sparkApplication :type api_group: str :param api_version: kubernetes api version of sparkApplication :type api_version: str """ template_fields = ("application_name", "namespace") FAILURE_STATES = ("FAILED", "UNKNOWN") SUCCESS_STATES = ("COMPLETED",) def __init__( self, *, application_name: str, attach_log: bool = False, namespace: Optional[str] = None, kubernetes_conn_id: str = "kubernetes_default", api_group: str = 'sparkoperator.k8s.io', api_version: str = 'v1beta2', **kwargs, ) -> None: super().__init__(**kwargs) self.application_name = application_name self.attach_log = attach_log self.namespace = namespace self.kubernetes_conn_id = kubernetes_conn_id self.hook = KubernetesHook(conn_id=self.kubernetes_conn_id) self.api_group = api_group self.api_version = api_version def _log_driver(self, application_state: str, response: dict) -> None: if not self.attach_log: return status_info = response["status"] if "driverInfo" not in status_info: return driver_info = status_info["driverInfo"] if "podName" not in driver_info: return driver_pod_name = driver_info["podName"] namespace = response["metadata"]["namespace"] log_method = self.log.error if application_state in self.FAILURE_STATES else self.log.info try: log = "" for line in self.hook.get_pod_logs(driver_pod_name, namespace=namespace): log += line.decode() log_method(log) except client.rest.ApiException as e: self.log.warning( "Could not read logs for pod %s. It may have been disposed.\n" "Make sure timeToLiveSeconds is set on your SparkApplication spec.\n" "underlying exception: %s", driver_pod_name, e, ) def poke(self, context: Dict) -> bool: self.log.info("Poking: %s", self.application_name) response = self.hook.get_custom_object( group=self.api_group, version=self.api_version, plural="sparkapplications", name=self.application_name, namespace=self.namespace, ) try: application_state = response["status"]["applicationState"]["state"] except KeyError: return False if self.attach_log and application_state in self.FAILURE_STATES + self.SUCCESS_STATES: self._log_driver(application_state, response) if application_state in self.FAILURE_STATES: raise AirflowException(f"Spark application failed with state: {application_state}") elif application_state in self.SUCCESS_STATES: self.log.info("Spark application ended successfully") return True else: self.log.info("Spark application is still in state: %s", application_state) return False<|fim▁end|>
# under the License. from typing import Dict, Optional
<|file_name|>fake-raf.ts<|end_file_name|><|fim▁begin|>let original: (fn: FrameRequestCallback) => number; let requesters: any[] = []; function fakeRaf(fn: FrameRequestCallback): number { requesters.push(fn); return requesters.length; } function use() { original = window.requestAnimationFrame; window.requestAnimationFrame = fakeRaf; } function restore() { setTimeout(() => { window.requestAnimationFrame = original; }, 2000); }<|fim▁hole|> requesters = []; cur.forEach(function(f) { return f(16); }); } export default { use, restore, step, };<|fim▁end|>
function step() { let cur = requesters;
<|file_name|>bitcoin_ca_ES.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="ca_ES" version="2.0"> <defaultcodec>UTF-8</defaultcodec> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About Dogecoin</source> <translation>Sobre Dogecoin</translation> </message> <message> <location line="+39"/> <source>&lt;b&gt;Dogecoin&lt;/b&gt; version</source> <translation>&lt;b&gt;Dogecoin&lt;/b&gt; versió</translation> </message> <message> <location line="+57"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source> <translation>\n Aquest és software experimental.\n\n Distribuït sota llicència de software MIT/11, veure l&apos;arxiu COPYING o http://www.opensource.org/licenses/mit-license.php.\n\nAquest producte inclou software desarrollat pel projecte OpenSSL per a l&apos;ús de OppenSSL Toolkit (http://www.openssl.org/) i de softwqre criptogràfic escrit per l&apos;Eric Young ([email protected]) i software UPnP escrit per en Thomas Bernard.</translation> </message> <message> <location filename="../aboutdialog.cpp" line="+14"/> <source>Copyright</source> <translation>Copyright</translation> </message> <message> <location line="+0"/> <source>The Dogecoin developers</source> <translation type="unfinished"/> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>Address Book</source> <translation>Llibreta d&apos;adreces</translation> </message> <message> <location line="+19"/> <source>Double-click to edit address or label</source> <translation>Feu doble clic per editar l&apos;adreça o l&apos;etiqueta</translation> </message> <message> <location line="+27"/> <source>Create a new address</source> <translation>Crear una nova adreça</translation> </message> <message> <location line="+14"/> <source>Copy the currently selected address to the system clipboard</source> <translation>Copiar l&apos;adreça seleccionada al porta-retalls del sistema</translation> </message> <message> <location line="-11"/> <source>&amp;New Address</source> <translation>&amp;Nova adreça</translation> </message> <message> <location filename="../addressbookpage.cpp" line="+63"/> <source>These are your Dogecoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source> <translation>Aquestes són les teves adreces Dogecoin per a rebre pagaments. Pot interesar-te proveïr diferents adreces a cadascun dels enviadors així pots identificar qui et va pagant.</translation> </message> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>&amp;Copy Address</source> <translation>&amp;Copiar adreça</translation> </message> <message> <location line="+11"/> <source>Show &amp;QR Code</source> <translation>Mostrar codi &amp;QR</translation> </message> <message> <location line="+11"/> <source>Sign a message to prove you own a Dogecoin address</source> <translation>Signa el missatge per provar que ets propietari de l&apos;adreça Dogecoin</translation> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation>Signar &amp;Missatge</translation> </message> <message> <location line="+25"/> <source>Delete the currently selected address from the list</source> <translation>Esborrar l&apos;adreça sel·leccionada</translation> </message> <message> <location line="+27"/> <source>Export the data in the current tab to a file</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Export</source> <translation type="unfinished"/> </message> <message> <location line="-44"/> <source>Verify a message to ensure it was signed with a specified Dogecoin address</source> <translation>Verificar un missatge per asegurar-se que ha estat signat amb una adreça Dogecoin específica</translation> </message> <message> <location line="+3"/> <source>&amp;Verify Message</source> <translation>&amp;Verificar el missatge</translation> </message> <message> <location line="+14"/> <source>&amp;Delete</source> <translation>&amp;Esborrar</translation> </message> <message> <location filename="../addressbookpage.cpp" line="-5"/> <source>These are your Dogecoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source> <translation>Aquestes són la seva adreça de Dogecoin per enviar els pagaments. Sempre revisi la quantitat i l&apos;adreça del destinatari abans transferència de monedes.</translation> </message> <message> <location line="+13"/> <source>Copy &amp;Label</source> <translation>Copiar &amp;Etiqueta</translation> </message> <message> <location line="+1"/> <source>&amp;Edit</source> <translation>&amp;Editar</translation> </message> <message> <location line="+1"/> <source>Send &amp;Coins</source> <translation>Enviar &amp;Monedes</translation> </message> <message> <location line="+260"/> <source>Export Address Book Data</source> <translation>Exporta llibreta d&apos;adreces</translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Arxiu de separació per comes (*.csv)</translation> </message> <message> <location line="+13"/> <source>Error exporting</source> <translation>Error en l&apos;exportació</translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation>No s&apos;ha pogut escriure a l&apos;arxiu %1.</translation> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+144"/> <source>Label</source> <translation>Etiqueta</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Adreça</translation> </message> <message> <location line="+36"/> <source>(no label)</source> <translation>(sense etiqueta)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation>Dialeg de contrasenya</translation> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation>Introdueix contrasenya</translation> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation>Nova contrasenya</translation> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation>Repeteix la nova contrasenya</translation> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+33"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;10 or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>Introdueixi la nova contrasenya al moneder&lt;br/&gt;Si us plau useu una contrasenya de &lt;b&gt;10 o més caracters aleatoris&lt;/b&gt;, o &lt;b&gt;vuit o més paraules&lt;/b&gt;.</translation> </message> <message> <location line="+1"/> <source>Encrypt wallet</source> <translation>Xifrar la cartera</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>Aquesta operació requereix la seva contrasenya del moneder per a desbloquejar-lo.</translation> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation>Desbloqueja el moneder</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>Aquesta operació requereix la seva contrasenya del moneder per a desencriptar-lo.</translation> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation>Desencripta el moneder</translation> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation>Canviar la contrasenya</translation> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation>Introdueixi tant l&apos;antiga com la nova contrasenya de moneder.</translation> </message> <message> <location line="+46"/> <source>Confirm wallet encryption</source> <translation>Confirmar l&apos;encriptació del moneder</translation> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR DOGECOINS&lt;/b&gt;!</source> <translation>Advertència: Si encripteu el vostre moneder i perdeu la constrasenya, &lt;b&gt;PERDREU TOTS ELS VOSTRES DOGECOINS&lt;/b&gt;!</translation> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation>Esteu segur que voleu encriptar el vostre moneder?</translation> </message> <message> <location line="+15"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>IMPORTANT: Tota copia de seguretat que hagis realitzat hauria de ser reemplaçada pel, recentment generat, arxiu encriptat del moneder.</translation> </message> <message> <location line="+100"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation>Advertència: Les lletres majúscules estàn activades!</translation> </message> <message> <location line="-130"/> <location line="+58"/> <source>Wallet encrypted</source> <translation>Moneder encriptat</translation> </message> <message> <location line="-56"/> <source>Dogecoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your dogecoins from being stolen by malware infecting your computer.</source> <translation>Dogecoin es tancarà ara per acabar el procés d&apos;encriptació. Recorda que encriptar el teu moneder no protegeix completament els teus dogecoins de ser robades per programari maliciós instal·lat al teu ordinador.</translation> </message> <message> <location line="+13"/> <location line="+7"/> <location line="+42"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation>L&apos;encriptació del moneder ha fallat</translation> </message> <message> <location line="-54"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>L&apos;encriptació del moneder ha fallat per un error intern. El seu moneder no ha estat encriptat.</translation> </message> <message> <location line="+7"/> <location line="+48"/> <source>The supplied passphrases do not match.</source> <translation>La contrasenya introduïda no coincideix.</translation> </message> <message> <location line="-37"/> <source>Wallet unlock failed</source> <translation>El desbloqueig del moneder ha fallat</translation> </message> <message> <location line="+1"/> <location line="+11"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>La contrasenya introduïda per a desencriptar el moneder és incorrecte.</translation> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation>La desencriptació del moneder ha fallat</translation> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation>La contrasenya del moneder ha estat modificada correctament.</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+233"/> <source>Sign &amp;message...</source> <translation>Signar &amp;missatge...</translation> </message> <message> <location line="+280"/> <source>Synchronizing with network...</source> <translation>Sincronitzant amb la xarxa ...</translation> </message> <message> <location line="-349"/> <source>&amp;Overview</source> <translation>&amp;Panorama general</translation> </message> <message> <location line="+1"/> <source>Show general overview of wallet</source> <translation>Mostra panorama general del moneder</translation> </message> <message> <location line="+20"/> <source>&amp;Transactions</source> <translation>&amp;Transaccions</translation> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation>Cerca a l&apos;historial de transaccions</translation> </message> <message> <location line="+7"/> <source>Edit the list of stored addresses and labels for sending</source> <translation>Edita la llista d&apos;adreces emmagatzemada i etiquetes</translation> </message> <message> <location line="-14"/> <source>Show the list of addresses for receiving payments</source> <translation>Mostra el llistat d&apos;adreces per rebre pagaments</translation> </message> <message> <location line="+31"/> <source>E&amp;xit</source> <translation>S&amp;ortir</translation> </message> <message> <location line="+1"/> <source>Quit application</source> <translation>Sortir de l&apos;aplicació</translation> </message> <message> <location line="+4"/> <source>Show information about Dogecoin</source> <translation>Mostra informació sobre Dogecoin</translation> </message> <message> <location line="+2"/> <source>About &amp;Qt</source> <translation>Sobre &amp;Qt</translation> </message> <message> <location line="+1"/> <source>Show information about Qt</source> <translation>Mostra informació sobre Qt</translation> </message> <message> <location line="+2"/> <source>&amp;Options...</source> <translation>&amp;Opcions...</translation> </message> <message> <location line="+6"/> <source>&amp;Encrypt Wallet...</source> <translation>&amp;Xifrar moneder</translation> </message> <message> <location line="+3"/> <source>&amp;Backup Wallet...</source> <translation>&amp;Realitzant copia de seguretat del moneder...</translation> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation>&amp;Canviar contrasenya...</translation> </message> <message> <location line="+285"/> <source>Importing blocks from disk...</source> <translation>Important blocs del disc..</translation> </message> <message> <location line="+3"/> <source>Reindexing blocks on disk...</source> <translation>Re-indexant blocs al disc...</translation> </message> <message> <location line="-347"/> <source>Send coins to a Dogecoin address</source> <translation>Enviar monedes a una adreça Dogecoin</translation> </message> <message> <location line="+49"/> <source>Modify configuration options for Dogecoin</source> <translation>Modificar les opcions de configuració per dogecoin</translation> </message> <message> <location line="+9"/> <source>Backup wallet to another location</source> <translation>Realitzar còpia de seguretat del moneder a un altre directori</translation> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation>Canviar la constrasenya d&apos;encriptació del moneder</translation> </message> <message> <location line="+6"/> <source>&amp;Debug window</source> <translation>&amp;Finestra de debug</translation> </message> <message> <location line="+1"/> <source>Open debugging and diagnostic console</source> <translation>Obrir la consola de diagnòstic i debugging</translation> </message> <message> <location line="-4"/> <source>&amp;Verify message...</source> <translation>&amp;Verifica el missatge..</translation> </message> <message> <location line="-165"/> <location line="+530"/> <source>Dogecoin</source> <translation>Dogecoin</translation> </message> <message> <location line="-530"/> <source>Wallet</source> <translation>Moneder</translation> </message> <message> <location line="+101"/> <source>&amp;Send</source> <translation>&amp;Enviar</translation> </message> <message> <location line="+7"/> <source>&amp;Receive</source> <translation>&amp;Rebre</translation> </message> <message> <location line="+14"/> <source>&amp;Addresses</source> <translation>&amp;Adreces</translation> </message> <message> <location line="+22"/> <source>&amp;About Dogecoin</source> <translation>&amp;Sobre Dogecoin</translation> </message> <message> <location line="+9"/> <source>&amp;Show / Hide</source> <translation>&amp;Mostrar / Amagar</translation> </message> <message> <location line="+1"/> <source>Show or hide the main Window</source> <translation>Mostrar o amagar la finestra principal</translation> </message> <message> <location line="+3"/> <source>Encrypt the private keys that belong to your wallet</source> <translation>Xifrar les claus privades pertanyents al seu moneder</translation> </message> <message> <location line="+7"/> <source>Sign messages with your Dogecoin addresses to prove you own them</source> <translation>Signa el missatges amb la seva adreça de Dogecoin per provar que les poseeixes</translation> </message> <message> <location line="+2"/> <source>Verify messages to ensure they were signed with specified Dogecoin addresses</source> <translation>Verificar els missatges per assegurar-te que han estat signades amb una adreça Dogecoin específica.</translation> </message> <message> <location line="+28"/> <source>&amp;File</source> <translation>&amp;Arxiu</translation> </message> <message> <location line="+7"/> <source>&amp;Settings</source> <translation>&amp;Configuració</translation> </message> <message> <location line="+6"/> <source>&amp;Help</source> <translation>&amp;Ajuda</translation> </message> <message> <location line="+9"/> <source>Tabs toolbar</source> <translation>Barra d&apos;eines de seccions</translation> </message> <message> <location line="+17"/> <location line="+10"/> <source>[testnet]</source> <translation>[testnet]</translation> </message> <message> <location line="+47"/> <source>Dogecoin client</source> <translation>Client Dogecoin</translation> </message> <message numerus="yes"> <location line="+141"/> <source>%n active connection(s) to Dogecoin network</source> <translation><numerusform>%n connexió activa a la xarxa Dogecoin</numerusform><numerusform>%n connexions actives a la xarxa Dogecoin</numerusform></translation> </message> <message> <location line="+22"/> <source>No block source available...</source> <translation type="unfinished"/> </message> <message> <location line="+12"/> <source>Processed %1 of %2 (estimated) blocks of transaction history.</source> <translation>Processat el %1 de %2 (estimat) dels blocs del històric de transaccions.</translation> </message> <message> <location line="+4"/> <source>Processed %1 blocks of transaction history.</source> <translation>Proccessats %1 blocs del històric de transaccions.</translation> </message> <message numerus="yes"> <location line="+20"/> <source>%n hour(s)</source> <translation><numerusform>%n hora</numerusform><numerusform>%n hores</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s)</source> <translation><numerusform>%n dia</numerusform><numerusform>%n dies</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n week(s)</source> <translation><numerusform>%n setmana</numerusform><numerusform>%n setmanes</numerusform></translation> </message> <message> <location line="+4"/> <source>%1 behind</source> <translation>%1 radera</translation> </message> <message> <location line="+14"/> <source>Last received block was generated %1 ago.</source> <translation>Lúltim bloc rebut ha estat generat fa %1.</translation> </message> <message> <location line="+2"/> <source>Transactions after this will not yet be visible.</source> <translation>Les transaccions a partir d&apos;això no seràn visibles.</translation> </message> <message> <location line="+22"/> <source>Error</source> <translation>Error</translation> </message> <message> <location line="+3"/> <source>Warning</source> <translation>Avís</translation> </message> <message> <location line="+3"/> <source>Information</source> <translation>Informació</translation> </message> <message> <location line="+70"/> <source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source> <translation>Aquesta transacció supera el límit de tamany. Tot i així pots enviar-la amb una comissió de %1, que es destinen als nodes que processen la seva transacció i ajuda a donar suport a la xarxa. Vols pagar la comissió?</translation> </message> <message> <location line="-140"/> <source>Up to date</source> <translation>Al dia</translation> </message> <message> <location line="+31"/> <source>Catching up...</source> <translation>Posar-se al dia ...</translation> </message> <message> <location line="+113"/> <source>Confirm transaction fee</source> <translation>Confirmar comisió de transacció</translation> </message> <message> <location line="+8"/> <source>Sent transaction</source> <translation>Transacció enviada</translation> </message> <message> <location line="+0"/> <source>Incoming transaction</source> <translation>Transacció entrant</translation> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation>Data: %1\nQuantitat %2\n Tipus: %3\n Adreça: %4\n</translation> </message> <message> <location line="+33"/> <location line="+23"/> <source>URI handling</source> <translation>Manejant URI</translation> </message> <message> <location line="-23"/> <location line="+23"/> <source>URI can not be parsed! This can be caused by an invalid Dogecoin address or malformed URI parameters.</source> <translation>la URI no pot ser processada! Això es pot ser causat per una adreça Dogecoin invalida o paràmetres URI malformats.</translation> </message> <message> <location line="+17"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>El moneder està &lt;b&gt;encriptat&lt;/b&gt; i actualment &lt;b&gt;desbloquejat&lt;/b&gt;</translation> </message> <message> <location line="+8"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>El moneder està &lt;b&gt;encriptat&lt;/b&gt; i actualment &lt;b&gt;bloquejat&lt;/b&gt;</translation> </message> <message> <location filename="../bitcoin.cpp" line="+111"/> <source>A fatal error occurred. Dogecoin can no longer continue safely and will quit.</source> <translation>Ha tingut lloc un error fatal. Dogecoin no pot continuar executant-se de manera segura i es tancará.</translation> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+104"/> <source>Network Alert</source> <translation>Alerta de xarxa</translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation>Editar Adreça</translation> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation>&amp;Etiqueta</translation> </message> <message> <location line="+10"/> <source>The label associated with this address book entry</source> <translation>Etiqueta associada amb aquesta entrada de la llibreta d&apos;adreces</translation> </message> <message> <location line="+7"/> <source>&amp;Address</source> <translation>&amp;Direcció</translation> </message> <message> <location line="+10"/> <source>The address associated with this address book entry. This can only be modified for sending addresses.</source> <translation>Adreça associada amb aquesta entrada de la llibreta d&apos;adreces. Només pot ser modificat per a enviar adreces.</translation> </message> <message> <location filename="../editaddressdialog.cpp" line="+21"/> <source>New receiving address</source> <translation>Nova adreça de recepció.</translation> </message> <message> <location line="+4"/> <source>New sending address</source> <translation>Nova adreça d&apos;enviament</translation> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation>Editar adreces de recepció</translation> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation>Editar adreces d&apos;enviament</translation> </message> <message> <location line="+76"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation>L&apos;adreça introduïda &quot;%1&quot; ja és present a la llibreta d&apos;adreces.</translation> </message> <message> <location line="-5"/> <source>The entered address &quot;%1&quot; is not a valid Dogecoin address.</source> <translation>L&apos;adreça introduida &quot;%1&quot; no és una adreça Dogecoin valida.</translation> </message> <message> <location line="+10"/> <source>Could not unlock wallet.</source> <translation>No s&apos;ha pogut desbloquejar el moneder.</translation> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation>Ha fallat la generació d&apos;una nova clau.</translation> </message> </context> <context> <name>GUIUtil::HelpMessageBox</name> <message> <location filename="../guiutil.cpp" line="+424"/> <location line="+12"/> <source>Dogecoin-Qt</source> <translation>Dogecoin-Qt</translation> </message> <message> <location line="-12"/> <source>version</source> <translation>versió</translation> </message> <message> <location line="+2"/> <source>Usage:</source> <translation>Ús:</translation> </message> <message> <location line="+1"/> <source>command-line options</source> <translation>Opcions de la línia d&apos;ordres</translation> </message> <message> <location line="+4"/> <source>UI options</source> <translation>Opcions de IU</translation> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation>Definir llenguatge, per exemple &quot;de_DE&quot; (per defecte: Preferències locals de sistema)</translation> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation>Iniciar minimitzat</translation> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation>Mostrar finestra de benvinguda a l&apos;inici (per defecte: 1)</translation> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation>Opcions</translation> </message> <message> <location line="+16"/> <source>&amp;Main</source> <translation>&amp;Principal</translation> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Pay transaction &amp;fee</source> <translation>Pagar &amp;comisió de transacció</translation> </message> <message> <location line="+31"/> <source>Automatically start Dogecoin after logging in to the system.</source> <translation>Iniciar automàticament Dogecoin després de l&apos;inici de sessió del sistema.</translation> </message> <message> <location line="+3"/> <source>&amp;Start Dogecoin on system login</source> <translation>&amp;Iniciar Dogecoin al inici de sessió del sistema.</translation> </message> <message> <location line="+35"/> <source>Reset all client options to default.</source> <translation>Reestablir totes les opcions del client.</translation> </message> <message> <location line="+3"/> <source>&amp;Reset Options</source> <translation>&amp;Reestablir Opcions</translation> </message> <message> <location line="+13"/> <source>&amp;Network</source> <translation>&amp;Xarxa</translation> </message> <message> <location line="+6"/> <source>Automatically open the Dogecoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation>Obrir el port del client de Dogecoin al router de forma automàtica. Això només funciona quan el teu router implementa UPnP i l&apos;opció està activada.</translation> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation>Port obert amb &amp;UPnP</translation> </message> <message> <location line="+7"/> <source>Connect to the Dogecoin network through a SOCKS proxy (e.g. when connecting through Tor).</source> <translation>Connectar a la xarxa Dogecoin a través de un SOCKS proxy (per exemple connectant a través de Tor).</translation> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS proxy:</source> <translation>&amp;Connecta a través de un proxy SOCKS:</translation> </message> <message> <location line="+9"/> <source>Proxy &amp;IP:</source> <translation>&amp;IP del proxy:</translation> </message> <message> <location line="+19"/> <source>IP address of the proxy (e.g. 127.0.0.1)</source> <translation>Adreça IP del proxy (per exemple 127.0.0.1)</translation> </message> <message> <location line="+7"/> <source>&amp;Port:</source> <translation>&amp;Port:</translation> </message> <message> <location line="+19"/> <source>Port of the proxy (e.g. 9050)</source> <translation>Port del proxy (per exemple 9050)</translation> </message> <message> <location line="+7"/> <source>SOCKS &amp;Version:</source> <translation>&amp;Versió de SOCKS:</translation> </message> <message> <location line="+13"/> <source>SOCKS version of the proxy (e.g. 5)</source> <translation>Versió SOCKS del proxy (per exemple 5)</translation> </message> <message> <location line="+36"/> <source>&amp;Window</source> <translation>&amp;Finestra</translation> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation>Mostrar només l&apos;icona de la barra al minimitzar l&apos;aplicació.</translation> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>&amp;Minimitzar a la barra d&apos;aplicacions</translation> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation>Minimitza en comptes de sortir de la aplicació al tancar la finestra. Quan aquesta opció està activa, la aplicació només es tancarà al seleccionar Sortir al menú.</translation> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation>M&amp;inimitzar al tancar</translation> </message> <message> <location line="+21"/> <source>&amp;Display</source> <translation>&amp;Pantalla</translation> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation>Llenguatge de la Interfície d&apos;Usuari:</translation> </message> <message> <location line="+13"/> <source>The user interface language can be set here. This setting will take effect after restarting Dogecoin.</source> <translation>Aquí pots definir el llenguatge de l&apos;aplicatiu. Aquesta configuració tindrà efecte un cop es reiniciï Dogecoin.</translation> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation>&amp;Unitats per mostrar les quantitats en:</translation> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>Sel·lecciona la unitat de subdivisió per defecte per mostrar en la interficie quan s&apos;envien monedes.</translation> </message> <message> <location line="+9"/> <source>Whether to show Dogecoin addresses in the transaction list or not.</source> <translation>Mostrar adreces Dogecoin als llistats de transaccions o no.</translation> </message> <message> <location line="+3"/> <source>&amp;Display addresses in transaction list</source> <translation>&amp;Mostrar adreces al llistat de transaccions</translation> </message> <message> <location line="+71"/> <source>&amp;OK</source> <translation>&amp;OK</translation> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation>&amp;Cancel·la</translation> </message> <message> <location line="+10"/> <source>&amp;Apply</source> <translation>&amp;Aplicar</translation> </message> <message> <location filename="../optionsdialog.cpp" line="+53"/> <source>default</source> <translation>Per defecte</translation> </message> <message> <location line="+130"/> <source>Confirm options reset</source> <translation>Confirmi el reestabliment de les opcions</translation> </message> <message> <location line="+1"/> <source>Some settings may require a client restart to take effect.</source> <translation>Algunes configuracions poden requerir reiniciar el client per a que tinguin efecte.</translation> </message> <message> <location line="+0"/> <source>Do you want to proceed?</source> <translation>Vols procedir?</translation> </message> <message> <location line="+42"/> <location line="+9"/> <source>Warning</source> <translation>Avís</translation> </message> <message> <location line="-9"/> <location line="+9"/> <source>This setting will take effect after restarting Dogecoin.</source> <translation>Aquesta configuració tindrà efecte un cop es reiniciï Dogecoin.</translation> </message> <message> <location line="+29"/> <source>The supplied proxy address is invalid.</source> <translation>L&apos;adreça proxy introduïda és invalida.</translation> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+14"/> <source>Form</source> <translation>Formulari</translation> </message> <message> <location line="+50"/> <location line="+166"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the Dogecoin network after a connection is established, but this process has not completed yet.</source> <translation>La informació mostrada pot no estar al día. El teu moneder es sincronitza automàticament amb la xarxa Dogecoin un cop s&apos;ha establert connexió, però aquest proces no s&apos;ha completat encara.</translation> </message> <message> <location line="-124"/> <source>Balance:</source> <translation>Balanç:</translation> </message> <message> <location line="+29"/> <source>Unconfirmed:</source> <translation>Sense confirmar:</translation> </message> <message> <location line="-78"/> <source>Wallet</source> <translation>Moneder</translation> </message> <message> <location line="+107"/> <source>Immature:</source> <translation>Immatur:</translation> </message> <message> <location line="+13"/> <source>Mined balance that has not yet matured</source> <translation>Balanç minat que encara no ha madurat</translation> </message> <message> <location line="+46"/> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation>&lt;b&gt;Transaccions recents&lt;/b&gt;</translation> </message> <message> <location line="-101"/> <source>Your current balance</source> <translation>El seu balanç actual</translation> </message> <message> <location line="+29"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source> <translation>Total de transaccions encara sense confirmar, que encara no es content en el balanç actual</translation> </message> <message> <location filename="../overviewpage.cpp" line="+116"/> <location line="+1"/> <source>out of sync</source> <translation>Fora de sincronia</translation> </message> </context> <context> <name>PaymentServer</name> <message> <location filename="../paymentserver.cpp" line="+107"/> <source>Cannot start dogecoin: click-to-pay handler</source> <translation>No es pot iniciar dogecoin: manejador clicla-per-pagar</translation> </message> </context> <context> <name>QRCodeDialog</name> <message> <location filename="../forms/qrcodedialog.ui" line="+14"/> <source>QR Code Dialog</source> <translation>Dialeg del codi QR</translation> </message> <message> <location line="+59"/> <source>Request Payment</source> <translation>Reclamar pagament</translation> </message> <message> <location line="+56"/> <source>Amount:</source> <translation>Quantitat:</translation> </message> <message> <location line="-44"/> <source>Label:</source> <translation>Etiqueta:</translation> </message> <message> <location line="+19"/> <source>Message:</source> <translation>Missatge:</translation> </message> <message> <location line="+71"/> <source>&amp;Save As...</source> <translation>&amp;Desar com...</translation> </message> <message> <location filename="../qrcodedialog.cpp" line="+62"/> <source>Error encoding URI into QR Code.</source> <translation>Error codificant la URI en un codi QR.</translation> </message> <message> <location line="+40"/> <source>The entered amount is invalid, please check.</source> <translation>La quantitat introduïda és invalida, si us plau comprovi-la.</translation> </message> <message> <location line="+23"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation>URI resultant massa llarga, intenta reduir el text per a la etiqueta / missatge</translation> </message> <message> <location line="+25"/> <source>Save QR Code</source> <translation>Desar codi QR</translation> </message> <message> <location line="+0"/> <source>PNG Images (*.png)</source> <translation>Imatges PNG (*.png)</translation> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+46"/> <source>Client name</source> <translation>Nom del client</translation> </message> <message> <location line="+10"/> <location line="+23"/> <location line="+26"/> <location line="+23"/> <location line="+23"/> <location line="+36"/> <location line="+53"/> <location line="+23"/> <location line="+23"/> <location filename="../rpcconsole.cpp" line="+339"/> <source>N/A</source> <translation>N/A</translation> </message> <message> <location line="-217"/> <source>Client version</source> <translation>Versió del client</translation> </message> <message> <location line="-45"/> <source>&amp;Information</source> <translation>&amp;Informació</translation> </message> <message> <location line="+68"/> <source>Using OpenSSL version</source> <translation>Utilitzant OpenSSL versió</translation> </message> <message> <location line="+49"/> <source>Startup time</source> <translation>&amp;Temps d&apos;inici</translation> </message> <message> <location line="+29"/> <source>Network</source> <translation>Xarxa</translation> </message> <message> <location line="+7"/> <source>Number of connections</source> <translation>Nombre de connexions</translation> </message> <message> <location line="+23"/> <source>On testnet</source> <translation>A testnet</translation> </message> <message> <location line="+23"/> <source>Block chain</source> <translation>Bloquejar cadena</translation> </message> <message> <location line="+7"/> <source>Current number of blocks</source> <translation>Nombre de blocs actuals</translation> </message> <message> <location line="+23"/> <source>Estimated total blocks</source> <translation>Total estimat de blocs</translation> </message> <message> <location line="+23"/> <source>Last block time</source> <translation>Últim temps de bloc</translation> </message> <message> <location line="+52"/> <source>&amp;Open</source> <translation>&amp;Obrir</translation> </message> <message> <location line="+16"/> <source>Command-line options</source> <translation>Opcions de línia d&apos;ordres</translation> </message> <message> <location line="+7"/> <source>Show the Dogecoin-Qt help message to get a list with possible Dogecoin command-line options.</source> <translation>Mostrar el missatge d&apos;ajuda de Dogecoin-Qt per a obtenir un llistat de possibles ordres per a la línia d&apos;ordres de Dogecoin.</translation> </message> <message> <location line="+3"/> <source>&amp;Show</source> <translation>&amp;Mostrar</translation> </message> <message> <location line="+24"/> <source>&amp;Console</source> <translation>&amp;Consola</translation> </message> <message> <location line="-260"/> <source>Build date</source> <translation>Data de compilació</translation> </message> <message> <location line="-104"/> <source>Dogecoin - Debug window</source> <translation>Dogecoin -Finestra de debug</translation> </message> <message> <location line="+25"/> <source>Dogecoin Core</source> <translation>Nucli de Dogecoin</translation> </message> <message> <location line="+279"/> <source>Debug log file</source> <translation>Dietàri de debug</translation> </message> <message> <location line="+7"/> <source>Open the Dogecoin debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation>Obrir el dietari de debug de Dogecoin del directori de dades actual. Aixó pot trigar uns quants segons per a dietàris grossos.</translation> </message> <message> <location line="+102"/> <source>Clear console</source> <translation>Netejar consola</translation> </message> <message> <location filename="../rpcconsole.cpp" line="-30"/> <source>Welcome to the Dogecoin RPC console.</source> <translation>Benvingut a la consola RPC de Dogecoin</translation> </message> <message> <location line="+1"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation>Utilitza les fletxes d&apos;amunt i avall per navegar per l&apos;històric, i &lt;b&gt;Ctrl-L&lt;\b&gt; per netejar la pantalla.</translation> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation>Escriu &lt;b&gt;help&lt;\b&gt; per a obtenir una llistat de les ordres disponibles.</translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+124"/> <location line="+5"/> <location line="+5"/> <location line="+5"/> <location line="+6"/> <location line="+5"/> <location line="+5"/> <source>Send Coins</source> <translation>Enviar monedes</translation> </message> <message> <location line="+50"/> <source>Send to multiple recipients at once</source> <translation>Enviar a multiples destinataris al mateix temps</translation> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation>Affegir &amp;Destinatari</translation> </message> <message> <location line="+20"/> <source>Remove all transaction fields</source> <translation>Netejar tots els camps de la transacció</translation> </message> <message> <location line="+3"/> <source>Clear &amp;All</source> <translation>Esborrar &amp;Tot</translation> </message> <message> <location line="+22"/> <source>Balance:</source> <translation>Balanç:</translation> </message> <message> <location line="+10"/> <source>123.456 BTC</source> <translation>123.456 BTC</translation> </message> <message> <location line="+31"/> <source>Confirm the send action</source> <translation>Confirmi l&apos;acció d&apos;enviament</translation> </message> <message> <location line="+3"/> <source>S&amp;end</source> <translation>E&amp;nviar</translation> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-59"/> <source>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</source> <translation>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</translation> </message> <message> <location line="+5"/> <source>Confirm send coins</source> <translation>Confirmar l&apos;enviament de monedes</translation> </message> <message> <location line="+1"/> <source>Are you sure you want to send %1?</source> <translation>Estas segur que vols enviar %1?</translation> </message> <message> <location line="+0"/> <source> and </source> <translation>i</translation> </message> <message> <location line="+23"/> <source>The recipient address is not valid, please recheck.</source> <translation>L&apos;adreça remetent no és vàlida, si us plau comprovi-la.</translation> </message> <message> <location line="+5"/> <source>The amount to pay must be larger than 0.</source> <translation>La quantitat a pagar ha de ser major que 0.</translation> </message> <message> <location line="+5"/> <source>The amount exceeds your balance.</source> <translation>Import superi el saldo de la seva compte.</translation> </message> <message> <location line="+5"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>El total excedeix el teu balanç quan s&apos;afegeix la comisió a la transacció %1.</translation> </message> <message> <location line="+6"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation>S&apos;ha trobat una adreça duplicada, tan sols es pot enviar a cada adreça un cop per ordre de enviament.</translation> </message> <message> <location line="+5"/> <source>Error: Transaction creation failed!</source> <translation>Error: La ceació de la transacció ha fallat!</translation> </message> <message> <location line="+5"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation>Error: La transacció ha estat rebutjada. Això pot passar si alguna de les monedes del teu moneder ja s&apos;han gastat, com si haguesis usat una copia de l&apos;arxiu wallet.dat i s&apos;haguessin gastat monedes de la copia però sense marcar com gastades en aquest.</translation> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+14"/> <source>Form</source> <translation>Formulari</translation> </message> <message> <location line="+15"/> <source>A&amp;mount:</source> <translation>Q&amp;uantitat:</translation> </message> <message> <location line="+13"/> <source>Pay &amp;To:</source> <translation>Pagar &amp;A:</translation> </message> <message> <location line="+34"/> <source>The address to send the payment to (e.g. DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</source> <translation>La adreça a on envia el pagament (per exemple: DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</translation> </message> <message> <location line="+60"/> <location filename="../sendcoinsentry.cpp" line="+26"/> <source>Enter a label for this address to add it to your address book</source> <translation>Introdueixi una etiquera per a aquesta adreça per afegir-la a la llibreta d&apos;adreces</translation> </message> <message> <location line="-78"/> <source>&amp;Label:</source> <translation>&amp;Etiqueta:</translation> </message> <message> <location line="+28"/> <source>Choose address from address book</source> <translation>Escollir adreça del llibre d&apos;adreces</translation> </message> <message> <location line="+10"/> <source>Alt+A</source> <translation>Alta+A</translation> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation>Enganxar adreça del porta-retalls</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+7"/> <source>Remove this recipient</source> <translation>Eliminar aquest destinatari</translation> </message> <message> <location filename="../sendcoinsentry.cpp" line="+1"/> <source>Enter a Dogecoin address (e.g. DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</source> <translation>Introdueixi una adreça de Dogecoin (per exemple DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</translation> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation>Signatures .Signar/Verificar un Missatge</translation> </message> <message> <location line="+13"/> <source>&amp;Sign Message</source> <translation>&amp;Signar Missatge</translation> </message> <message> <location line="+6"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation>Pots signar missatges amb la teva adreça per provar que són teus. Sigues cautelòs al signar qualsevol cosa, ja que els atacs phising poden intentar confondre&apos;t per a que els hi signis amb la teva identitat. Tan sols signa als documents completament detallats amb els que hi estàs d&apos;acord.</translation> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</source> <translation>La adreça amb la que signat els missatges (per exemple DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</translation> </message> <message> <location line="+10"/> <location line="+213"/> <source>Choose an address from the address book</source> <translation>Escollir una adreça de la llibreta de direccions</translation> </message> <message> <location line="-203"/> <location line="+213"/> <source>Alt+A</source> <translation>Alta+A</translation> </message> <message> <location line="-203"/> <source>Paste address from clipboard</source> <translation>Enganxar adreça del porta-retalls</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation>Introdueix aqui el missatge que vols signar</translation> </message> <message> <location line="+7"/> <source>Signature</source> <translation>Signatura</translation> </message> <message> <location line="+27"/> <source>Copy the current signature to the system clipboard</source> <translation>Copiar la signatura actual al porta-retalls del sistema</translation> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this Dogecoin address</source> <translation>Signa el missatge per provar que ets propietari d&apos;aquesta adreça Dogecoin</translation> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation>Signar &amp;Missatge</translation> </message> <message> <location line="+14"/> <source>Reset all sign message fields</source> <translation>Neteja tots els camps de clau</translation> </message> <message> <location line="+3"/> <location line="+146"/> <source>Clear &amp;All</source> <translation>Esborrar &amp;Tot</translation> </message> <message> <location line="-87"/> <source>&amp;Verify Message</source> <translation>&amp;Verificar el missatge</translation> </message> <message> <location line="+6"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation>Introdueixi l&apos;adreça signant, missatge (assegura&apos;t que copies salts de línia, espais, tabuladors, etc excactament tot el text) i la signatura a sota per verificar el missatge. Per evitar ser enganyat per un atac home-entre-mig, vés amb compte de no llegir més en la signatura del que hi ha al missatge signat mateix.</translation> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</source> <translation>La adreça amb el que el missatge va ser signat (per exemple DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</translation> </message> <message> <location line="+40"/> <source>Verify the message to ensure it was signed with the specified Dogecoin address</source> <translation>Verificar el missatge per assegurar-se que ha estat signat amb una adreça Dogecoin específica</translation> </message> <message> <location line="+3"/> <source>Verify &amp;Message</source> <translation>Verificar &amp;Missatge</translation> </message> <message> <location line="+14"/> <source>Reset all verify message fields</source> <translation>Neteja tots els camps de verificació de missatge</translation> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+27"/> <location line="+3"/> <source>Enter a Dogecoin address (e.g. DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</source> <translation>Introdueixi una adreça de Dogecoin (per exemple DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</translation> </message> <message> <location line="-2"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation>Clica &quot;Signar Missatge&quot; per a generar una signatura</translation> </message> <message> <location line="+3"/> <source>Enter Dogecoin signature</source> <translation>Introduïr una clau Dogecoin</translation> </message> <message> <location line="+82"/> <location line="+81"/> <source>The entered address is invalid.</source> <translation>L&apos;adreça intoduïda és invàlida.</translation> </message> <message> <location line="-81"/> <location line="+8"/> <location line="+73"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation>Siu us plau, comprovi l&apos;adreça i provi de nou.</translation> </message> <message> <location line="-81"/> <location line="+81"/> <source>The entered address does not refer to a key.</source> <translation>L&apos;adreça introduïda no referencia a cap clau.</translation> </message> <message> <location line="-73"/> <source>Wallet unlock was cancelled.</source> <translation>El desbloqueig del moneder ha estat cancelat.</translation> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation>La clau privada per a la adreça introduïda no està disponible.</translation> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation>El signat del missatge ha fallat.</translation> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation>Missatge signat.</translation> </message> <message> <location line="+59"/> <source>The signature could not be decoded.</source> <translation>La signatura no s&apos;ha pogut decodificar .</translation> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation>Su us plau, comprovi la signatura i provi de nou.</translation> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation>La signatura no coincideix amb el resum del missatge.</translation> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation>Ha fallat la verificació del missatge.</translation> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation>Missatge verificat.</translation> </message> </context> <context> <name>SplashScreen</name> <message> <location filename="../splashscreen.cpp" line="+22"/> <source>The Dogecoin developers</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>[testnet]</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionDesc</name> <message> <location filename="../transactiondesc.cpp" line="+20"/> <source>Open until %1</source> <translation>Obert fins %1</translation> </message> <message> <location line="+6"/> <source>%1/offline</source> <translation>%1/offline</translation> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation>%1/sense confirmar</translation> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation>%1 confrimacions</translation> </message> <message> <location line="+18"/> <source>Status</source> <translation>Estat</translation> </message> <message numerus="yes"> <location line="+7"/> <source>, broadcast through %n node(s)</source> <translation><numerusform>, difusió a través de %n node</numerusform><numerusform>, difusió a través de %n nodes</numerusform></translation> </message> <message> <location line="+4"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+7"/> <source>Source</source> <translation>Font</translation> </message> <message> <location line="+0"/> <source>Generated</source> <translation>Generat</translation> </message> <message> <location line="+5"/> <location line="+17"/> <source>From</source> <translation>Des de</translation> </message> <message> <location line="+1"/> <location line="+22"/> <location line="+58"/> <source>To</source> <translation>A</translation> </message> <message> <location line="-77"/> <location line="+2"/> <source>own address</source> <translation>Adreça pròpia</translation> </message> <message> <location line="-2"/> <source>label</source> <translation>etiqueta</translation> </message> <message> <location line="+37"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+30"/> <source>Credit</source> <translation>Crèdit</translation> </message> <message numerus="yes"> <location line="-102"/> <source>matures in %n more block(s)</source> <translation><numerusform>disponible en %n bloc més</numerusform><numerusform>disponibles en %n blocs més</numerusform></translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation>no acceptat</translation> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+30"/> <source>Debit</source> <translation>Dèbit</translation> </message> <message> <location line="-39"/> <source>Transaction fee</source> <translation>Comissió de transacció</translation> </message> <message> <location line="+16"/> <source>Net amount</source> <translation>Quantitat neta</translation> </message> <message> <location line="+6"/> <source>Message</source> <translation>Missatge</translation> </message> <message> <location line="+2"/> <source>Comment</source> <translation>Comentar</translation> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation>ID de transacció</translation> </message> <message> <location line="+3"/> <source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation>Les monedes generades han de madurar 120 blocs abans de poder ser gastades. Quan has generat aquest bloc, aquest ha estat transmés a la xarxa per a ser afegit a la cadena de blocs. Si no arriba a ser acceptat a la cadena, el seu estat passará a &quot;no acceptat&quot; i no podrá ser gastat. Això pot ocòrrer ocasionalment si un altre node genera un bloc a pocs segons del teu.</translation> </message> <message> <location line="+7"/> <source>Debug information</source> <translation>Informació de debug</translation> </message> <message> <location line="+8"/> <source>Transaction</source> <translation>Transacció</translation> </message> <message> <location line="+3"/> <source>Inputs</source> <translation>Entrades</translation> </message> <message> <location line="+23"/> <source>Amount</source> <translation>Quantitat</translation> </message> <message> <location line="+1"/> <source>true</source> <translation>cert</translation> </message> <message> <location line="+0"/> <source>false</source> <translation>fals</translation> </message> <message> <location line="-209"/> <source>, has not been successfully broadcast yet</source> <translation>, encara no ha estat emès correctement</translation> </message> <message numerus="yes"> <location line="-35"/> <source>Open for %n more block(s)</source> <translation><numerusform>Obre per %n bloc més</numerusform><numerusform>Obre per %n blocs més</numerusform></translation> </message> <message> <location line="+70"/> <source>unknown</source> <translation>desconegut</translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+14"/> <source>Transaction details</source> <translation>Detall de la transacció</translation> </message> <message> <location line="+6"/> <source>This pane shows a detailed description of the transaction</source> <translation>Aquest panell mostra una descripció detallada de la transacció</translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+225"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+0"/> <source>Type</source> <translation>Tipus</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Direcció</translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation>Quantitat</translation> </message> <message numerus="yes"> <location line="+57"/> <source>Open for %n more block(s)</source> <translation><numerusform>Obre per %n bloc més</numerusform><numerusform>Obre per %n blocs més</numerusform></translation> </message> <message> <location line="+3"/> <source>Open until %1</source> <translation>Obert fins %1</translation> </message> <message> <location line="+3"/> <source>Offline (%1 confirmations)</source> <translation>Sense connexió (%1 confirmacions)</translation> </message> <message> <location line="+3"/> <source>Unconfirmed (%1 of %2 confirmations)</source> <translation>Sense confirmar (%1 de %2 confirmacions)</translation> </message> <message> <location line="+3"/> <source>Confirmed (%1 confirmations)</source> <translation>Confirmat (%1 confirmacions)</translation> </message> <message numerus="yes"> <location line="+8"/> <source>Mined balance will be available when it matures in %n more block(s)</source> <translation><numerusform>El saldo recent minat estarà disponible quan venci el termini en %n bloc més</numerusform><numerusform>El saldo recent minat estarà disponible quan venci el termini en %n blocs més</numerusform></translation> </message> <message> <location line="+5"/> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation>Aquest bloc no ha estat rebut per cap altre node i probablement no serà acceptat!</translation> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation>Generat però no acceptat</translation> </message> <message> <location line="+43"/> <source>Received with</source> <translation>Rebut amb</translation> </message> <message> <location line="+2"/> <source>Received from</source> <translation>Rebut de</translation> </message> <message> <location line="+3"/> <source>Sent to</source> <translation>Enviat a</translation> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation>Pagament a un mateix</translation> </message> <message> <location line="+2"/> <source>Mined</source> <translation>Minat</translation> </message> <message> <location line="+38"/> <source>(n/a)</source> <translation>(n/a)</translation> </message> <message> <location line="+199"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation>Estat de la transacció. Desplaça&apos;t per aquí sobre per mostrar el nombre de confirmacions.</translation> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation>Data i hora en que la transacció va ser rebuda.</translation> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation>Tipus de transacció.</translation> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation>Adreça del destinatari de la transacció.</translation> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation>Quantitat extreta o afegida del balanç.</translation> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+52"/> <location line="+16"/> <source>All</source> <translation>Tot</translation> </message> <message> <location line="-15"/> <source>Today</source> <translation>Avui</translation> </message> <message> <location line="+1"/> <source>This week</source> <translation>Aquesta setmana</translation> </message> <message> <location line="+1"/> <source>This month</source> <translation>Aquest mes</translation> </message> <message> <location line="+1"/> <source>Last month</source> <translation>El mes passat</translation> </message> <message> <location line="+1"/> <source>This year</source> <translation>Enguany</translation> </message> <message> <location line="+1"/> <source>Range...</source> <translation>Rang...</translation> </message> <message> <location line="+11"/> <source>Received with</source> <translation>Rebut amb</translation> </message> <message> <location line="+2"/> <source>Sent to</source> <translation>Enviat a</translation> </message> <message> <location line="+2"/> <source>To yourself</source> <translation>A tu mateix</translation> </message> <message> <location line="+1"/> <source>Mined</source> <translation>Minat</translation> </message> <message> <location line="+1"/> <source>Other</source> <translation>Altres</translation> </message> <message> <location line="+7"/> <source>Enter address or label to search</source> <translation>Introdueix una adreça o una etiqueta per cercar</translation> </message> <message> <location line="+7"/> <source>Min amount</source> <translation>Quantitat mínima</translation> </message> <message> <location line="+34"/> <source>Copy address</source> <translation>Copiar adreça </translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Copiar etiqueta</translation><|fim▁hole|> <location line="+1"/> <source>Copy amount</source> <translation>Copiar quantitat</translation> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation>Copiar ID de transacció</translation> </message> <message> <location line="+1"/> <source>Edit label</source> <translation>Editar etiqueta</translation> </message> <message> <location line="+1"/> <source>Show transaction details</source> <translation>Mostra detalls de la transacció</translation> </message> <message> <location line="+139"/> <source>Export Transaction Data</source> <translation>Exportar detalls de la transacció </translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Arxiu de separació per comes (*.csv)</translation> </message> <message> <location line="+8"/> <source>Confirmed</source> <translation>Confirmat</translation> </message> <message> <location line="+1"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+1"/> <source>Type</source> <translation>Tipus</translation> </message> <message> <location line="+1"/> <source>Label</source> <translation>Etiqueta</translation> </message> <message> <location line="+1"/> <source>Address</source> <translation>Direcció</translation> </message> <message> <location line="+1"/> <source>Amount</source> <translation>Quantitat</translation> </message> <message> <location line="+1"/> <source>ID</source> <translation>ID</translation> </message> <message> <location line="+4"/> <source>Error exporting</source> <translation>Error en l&apos;exportació</translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation>No s&apos;ha pogut escriure a l&apos;arxiu %1.</translation> </message> <message> <location line="+100"/> <source>Range:</source> <translation>Rang:</translation> </message> <message> <location line="+8"/> <source>to</source> <translation>a</translation> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+193"/> <source>Send Coins</source> <translation>Enviar monedes</translation> </message> </context> <context> <name>WalletView</name> <message> <location filename="../walletview.cpp" line="+42"/> <source>&amp;Export</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Export the data in the current tab to a file</source> <translation type="unfinished"/> </message> <message> <location line="+193"/> <source>Backup Wallet</source> <translation>Realitzar còpia de seguretat del moneder</translation> </message> <message> <location line="+0"/> <source>Wallet Data (*.dat)</source> <translation>Dades del moneder (*.dat)</translation> </message> <message> <location line="+3"/> <source>Backup Failed</source> <translation>Còpia de seguretat faillida</translation> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to the new location.</source> <translation>Hi ha hagut un error intentant desar les dades del moneder al nou directori</translation> </message> <message> <location line="+4"/> <source>Backup Successful</source> <translation>Copia de seguretat realitzada correctament</translation> </message> <message> <location line="+0"/> <source>The wallet data was successfully saved to the new location.</source> <translation>Les dades del moneder han estat desades cirrectament al nou emplaçament.</translation> </message> </context> <context> <name>bitcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+94"/> <source>Dogecoin version</source> <translation>Versió de Dogecoin</translation> </message> <message> <location line="+102"/> <source>Usage:</source> <translation>Ús:</translation> </message> <message> <location line="-29"/> <source>Send command to -server or dogecoind</source> <translation>Enviar comanda a -servidor o dogecoind</translation> </message> <message> <location line="-23"/> <source>List commands</source> <translation>Llista d&apos;ordres</translation> </message> <message> <location line="-12"/> <source>Get help for a command</source> <translation>Obtenir ajuda per a un ordre.</translation> </message> <message> <location line="+24"/> <source>Options:</source> <translation>Opcions:</translation> </message> <message> <location line="+24"/> <source>Specify configuration file (default: dogecoin.conf)</source> <translation>Especificat arxiu de configuració (per defecte: dogecoin.conf)</translation> </message> <message> <location line="+3"/> <source>Specify pid file (default: dogecoind.pid)</source> <translation>Especificar arxiu pid (per defecte: dogecoind.pid)</translation> </message> <message> <location line="-1"/> <source>Specify data directory</source> <translation>Especificar directori de dades</translation> </message> <message> <location line="-9"/> <source>Set database cache size in megabytes (default: 25)</source> <translation>Establir tamany de la memoria cau en megabytes (per defecte: 25)</translation> </message> <message> <location line="-28"/> <source>Listen for connections on &lt;port&gt; (default: 22556 or testnet: 44556)</source> <translation>Escoltar connexions a &lt;port&gt; (per defecte: 22556 o testnet: 44556)</translation> </message> <message> <location line="+5"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation>Mantenir com a molt &lt;n&gt; connexions a peers (per defecte: 125)</translation> </message> <message> <location line="-48"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation>Connectar al node per obtenir les adreces de les connexions, i desconectar</translation> </message> <message> <location line="+82"/> <source>Specify your own public address</source> <translation>Especificar la teva adreça pública</translation> </message> <message> <location line="+3"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation>Límit per a desconectar connexions errònies (per defecte: 100)</translation> </message> <message> <location line="-134"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation>Nombre de segons abans de reconectar amb connexions errònies (per defecte: 86400)</translation> </message> <message> <location line="-29"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation>Ha sorgit un error al configurar el port RPC %u escoltant a IPv4: %s</translation> </message> <message> <location line="+27"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 22555 or testnet: 44555)</source> <translation>Escoltar connexions JSON-RPC al port &lt;port&gt; (per defecte: 22555 o testnet:44555)</translation> </message> <message> <location line="+37"/> <source>Accept command line and JSON-RPC commands</source> <translation>Acceptar línia d&apos;ordres i ordres JSON-RPC </translation> </message> <message> <location line="+76"/> <source>Run in the background as a daemon and accept commands</source> <translation>Executar en segon pla com a programa dimoni i acceptar ordres</translation> </message> <message> <location line="+37"/> <source>Use the test network</source> <translation>Usar la xarxa de prova</translation> </message> <message> <location line="-112"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation>Aceptar connexions d&apos;afora (per defecte: 1 si no -proxy o -connect)</translation> </message> <message> <location line="-80"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=dogecoinrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;Dogecoin Alert&quot; [email protected] </source> <translation>%s has de establir una contrasenya RPC a l&apos;arxiu de configuració:\n%s\nEs recomana que useu la següent constrasenya aleatòria:\nrpcuser=dogecoinrpc\nrpcpassword=%s\n(no necesiteu recordar aquesta contrsenya)\nEl nom d&apos;usuari i contrasenya NO HAN de ser els mateixos.\nSi l&apos;arxiu no existeix, crea&apos;l amb els permisos d&apos;arxiu de només lectura per al propietari.\nTambé es recomana establir la notificació d&apos;alertes i així seràs notificat de les incidències;\nper exemple: alertnotify=echo %%s | mail -s &quot;Dogecoin Alert&quot; [email protected]</translation> </message> <message> <location line="+17"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation>Ha sorgit un error al configurar el port RPC %u escoltant a IPv6, retrocedint a IPv4: %s</translation> </message> <message> <location line="+3"/> <source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source> <translation>Vincular a una adreça específica i sempre escoltar-hi. Utilitza la notació [host]:port per IPv6</translation> </message> <message> <location line="+3"/> <source>Cannot obtain a lock on data directory %s. Dogecoin is probably already running.</source> <translation>No es pot bloquejar el directori de dades %s. Probablement Dogecoin ja estigui en execució.</translation> </message> <message> <location line="+3"/> <source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation>Error: La transacció ha estat rebutjada. Això pot passar si alguna de les monedes del teu moneder ja s&apos;han gastat, com si haguesis usat una copia de l&apos;arxiu wallet.dat i s&apos;haguessin gastat monedes de la copia però sense marcar com gastades en aquest.</translation> </message> <message> <location line="+4"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source> <translation>Error: Aquesta transacció requereix una comissió d&apos;almenys %s degut al seu import, complexitat o per l&apos;ús de fons recentment rebuts!</translation> </message> <message> <location line="+3"/> <source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source> <translation>Executar ordre al rebre una alerta rellevant (%s al cmd es reemplaça per message)</translation> </message> <message> <location line="+3"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation>Executar una ordre quan una transacció del moneder canviï (%s in cmd es canvia per TxID)</translation> </message> <message> <location line="+11"/> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source> <translation>Establir una mida màxima de transaccions d&apos;alta prioritat/baixa comisió en bytes (per defecte: 27000)</translation> </message> <message> <location line="+6"/> <source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source> <translation>Aquesta és una versió de pre-llançament - utilitza-la sota la teva responsabilitat - No usar per a minería o aplicacions de compra-venda</translation> </message> <message> <location line="+5"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation>Advertència: el -paytxfee és molt elevat! Aquesta és la comissió de transacció que pagaràs quan enviis una transacció.</translation> </message> <message> <location line="+3"/> <source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source> <translation>Advertència: Les transaccions mostrades poden no ser correctes! Pot esser que necessitis actualitzar, o bé que altres nodes ho necessitin.</translation> </message> <message> <location line="+3"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong Dogecoin will not work properly.</source> <translation>Advertència: Si us plau comprovi que la data i hora del seu computador siguin correctes! Si el seu rellotge està mal configurat, Dogecoin no funcionará de manera apropiada.</translation> </message> <message> <location line="+3"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation>Advertència: Error llegint l&apos;arxiu wallet.dat!! Totes les claus es llegeixen correctament, però hi ha dades de transaccions o entrades del llibre d&apos;adreces absents o bé son incorrectes.</translation> </message> <message> <location line="+3"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation>Advertència: L&apos;arxiu wallet.dat és corrupte, dades rescatades! L&apos;arxiu wallet.dat original ha estat desat com wallet.{estampa_temporal}.bak al directori %s; si el teu balanç o transaccions son incorrectes hauries de restaurar-lo de un backup.</translation> </message> <message> <location line="+14"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation>Intentar recuperar les claus privades d&apos;un arxiu wallet.dat corrupte</translation> </message> <message> <location line="+2"/> <source>Block creation options:</source> <translation>Opcions de la creació de blocs:</translation> </message> <message> <location line="+5"/> <source>Connect only to the specified node(s)</source> <translation>Connectar només al(s) node(s) especificats</translation> </message> <message> <location line="+3"/> <source>Corrupted block database detected</source> <translation>S&apos;ha detectat una base de dades de blocs corrupta</translation> </message> <message> <location line="+1"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation>Descobrir la pròpia adreça IP (per defecte: 1 quan escoltant i no -externalip)</translation> </message> <message> <location line="+1"/> <source>Do you want to rebuild the block database now?</source> <translation>Vols reconstruir la base de dades de blocs ara?</translation> </message> <message> <location line="+2"/> <source>Error initializing block database</source> <translation>Error carregant la base de dades de blocs</translation> </message> <message> <location line="+1"/> <source>Error initializing wallet database environment %s!</source> <translation>Error inicialitzant l&apos;entorn de la base de dades del moneder %s!</translation> </message> <message> <location line="+1"/> <source>Error loading block database</source> <translation>Error carregant la base de dades del bloc</translation> </message> <message> <location line="+4"/> <source>Error opening block database</source> <translation>Error obrint la base de dades de blocs</translation> </message> <message> <location line="+2"/> <source>Error: Disk space is low!</source> <translation>Error: Espai al disc baix!</translation> </message> <message> <location line="+1"/> <source>Error: Wallet locked, unable to create transaction!</source> <translation>Error: El moneder està blocat, no és possible crear la transacció!</translation> </message> <message> <location line="+1"/> <source>Error: system error: </source> <translation>Error: error de sistema:</translation> </message> <message> <location line="+1"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation>Error al escoltar a qualsevol port. Utilitza -listen=0 si vols això.</translation> </message> <message> <location line="+1"/> <source>Failed to read block info</source> <translation>Ha fallat la lectura de la informació del bloc</translation> </message> <message> <location line="+1"/> <source>Failed to read block</source> <translation>Ha fallat la lectura del bloc</translation> </message> <message> <location line="+1"/> <source>Failed to sync block index</source> <translation>Ha fallat la sincronització de l&apos;índex de bloc</translation> </message> <message> <location line="+1"/> <source>Failed to write block index</source> <translation>Ha fallat la escriptura de l&apos;índex de blocs</translation> </message> <message> <location line="+1"/> <source>Failed to write block info</source> <translation>Ha fallat la escriptura de la informació de bloc</translation> </message> <message> <location line="+1"/> <source>Failed to write block</source> <translation>Ha fallat l&apos;escriptura del bloc</translation> </message> <message> <location line="+1"/> <source>Failed to write file info</source> <translation>Ha fallat l&apos;escriptura de l&apos;arxiu info</translation> </message> <message> <location line="+1"/> <source>Failed to write to coin database</source> <translation>Ha fallat l&apos;escriptura de la basse de dades de monedes</translation> </message> <message> <location line="+1"/> <source>Failed to write transaction index</source> <translation>Ha fallat l&apos;escriptura de l&apos;índex de transaccions</translation> </message> <message> <location line="+1"/> <source>Failed to write undo data</source> <translation>Ha fallat el desfer de dades</translation> </message> <message> <location line="+2"/> <source>Find peers using DNS lookup (default: 1 unless -connect)</source> <translation>Cerca punts de connexió usant rastreig de DNS (per defecte: 1 tret d&apos;usar -connect)</translation> </message> <message> <location line="+1"/> <source>Generate coins (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>How many blocks to check at startup (default: 288, 0 = all)</source> <translation>Quants blocs s&apos;han de confirmar a l&apos;inici (per defecte: 288, 0 = tots)</translation> </message> <message> <location line="+1"/> <source>How thorough the block verification is (0-4, default: 3)</source> <translation>Com verificar el bloc (0-4, per defecte 3)</translation> </message> <message> <location line="+19"/> <source>Not enough file descriptors available.</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Rebuild block chain index from current blk000??.dat files</source> <translation>Reconstruir l&apos;índex de la cadena de blocs dels arxius actuals blk000??.dat</translation> </message> <message> <location line="+16"/> <source>Set the number of threads to service RPC calls (default: 4)</source> <translation>Estableix el nombre de fils per atendre trucades RPC (per defecte: 4)</translation> </message> <message> <location line="+26"/> <source>Verifying blocks...</source> <translation>Verificant blocs...</translation> </message> <message> <location line="+1"/> <source>Verifying wallet...</source> <translation>Verificant moneder...</translation> </message> <message> <location line="-69"/> <source>Imports blocks from external blk000??.dat file</source> <translation>Importa blocs de un fitxer blk000??.dat extern</translation> </message> <message> <location line="-76"/> <source>Set the number of script verification threads (up to 16, 0 = auto, &lt;0 = leave that many cores free, default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+77"/> <source>Information</source> <translation>&amp;Informació</translation> </message> <message> <location line="+3"/> <source>Invalid -tor address: &apos;%s&apos;</source> <translation>Adreça -tor invàlida: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Invalid amount for -minrelaytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Invalid amount for -mintxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Maintain a full transaction index (default: 0)</source> <translation>Mantenir tot l&apos;índex de transaccions (per defecte: 0)</translation> </message> <message> <location line="+2"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation>Mida màxima del buffer de recepció per a cada connexió, &lt;n&gt;*1000 bytes (default: 5000)</translation> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation>Mida màxima del buffer d&apos;enviament per a cada connexió, &lt;n&gt;*1000 bytes (default: 5000)</translation> </message> <message> <location line="+2"/> <source>Only accept block chain matching built-in checkpoints (default: 1)</source> <translation>Tan sols acceptar cadenes de blocs que coincideixin amb els punts de prova (per defecte: 1)</translation> </message> <message> <location line="+1"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation>Només connectar als nodes de la xarxa &lt;net&gt; (IPv4, IPv6 o Tor)</translation> </message> <message> <location line="+2"/> <source>Output extra debugging information. Implies all other -debug* options</source> <translation>Sortida de la informació extra de debugging. Implica totes les demés opcions -debug*</translation> </message> <message> <location line="+1"/> <source>Output extra network debugging information</source> <translation>Sortida de la informació extra de debugging de xarxa.</translation> </message> <message> <location line="+2"/> <source>Prepend debug output with timestamp (default: 1)</source> <translation>Anteposar estampa temporal a les dades de debug</translation> </message> <message> <location line="+5"/> <source>SSL options: (see the DogeCoin Wiki for SSL setup instructions)</source> <translation>Opcions SSL: (veure la Wiki de Dogecoin per a instruccions de configuració SSL)</translation> </message> <message> <location line="+1"/> <source>Select the version of socks proxy to use (4-5, default: 5)</source> <translation>Selecciona la versió de socks proxy a utilitzar (4-5, per defecte: 5)</translation> </message> <message> <location line="+3"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation>Enviar informació de traça/debug a la consola en comptes del arxiu debug.log</translation> </message> <message> <location line="+1"/> <source>Send trace/debug info to debugger</source> <translation>Enviar informació de traça/debug a un debugger</translation> </message> <message> <location line="+5"/> <source>Set maximum block size in bytes (default: 250000)</source> <translation>Establir una mida màxima de bloc en bytes (per defecte: 250000)</translation> </message> <message> <location line="+1"/> <source>Set minimum block size in bytes (default: 0)</source> <translation>Establir una mida mínima de bloc en bytes (per defecte: 0)</translation> </message> <message> <location line="+2"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation>Reduir l&apos;arxiu debug.log al iniciar el client (per defecte 1 quan no -debug)</translation> </message> <message> <location line="+1"/> <source>Signing transaction failed</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation>Especificar el temps limit per a un intent de connexió en milisegons (per defecte: 5000)</translation> </message> <message> <location line="+4"/> <source>System error: </source> <translation>Error de sistema:</translation> </message> <message> <location line="+4"/> <source>Transaction amount too small</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Transaction amounts must be positive</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Transaction too large</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation>Utilitza UPnP per a mapejar els ports d&apos;escolta (per defecte: 0)</translation> </message> <message> <location line="+1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation>Utilitza UPnP per a mapejar els ports d&apos;escolta (per defecte: 1 quan s&apos;escolta)</translation> </message> <message> <location line="+1"/> <source>Use proxy to reach tor hidden services (default: same as -proxy)</source> <translation>Utilitzar proxy per arribar als serveis tor amagats (per defecte: el mateix que -proxy)</translation> </message> <message> <location line="+2"/> <source>Username for JSON-RPC connections</source> <translation>Nom d&apos;usuari per a connexions JSON-RPC</translation> </message> <message> <location line="+4"/> <source>Warning</source> <translation>Avís</translation> </message> <message> <location line="+1"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation>Advertència: Aquetsa versió està obsoleta, és necessari actualitzar!</translation> </message> <message> <location line="+1"/> <source>You need to rebuild the databases using -reindex to change -txindex</source> <translation>Necessiteu reconstruir les bases de dades usant -reindex per canviar -txindex</translation> </message> <message> <location line="+1"/> <source>wallet.dat corrupt, salvage failed</source> <translation>L&apos;arxiu wallet.data és corrupte, el rescat de les dades ha fallat</translation> </message> <message> <location line="-50"/> <source>Password for JSON-RPC connections</source> <translation>Contrasenya per a connexions JSON-RPC</translation> </message> <message> <location line="-67"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation>Permetre connexions JSON-RPC d&apos;adreces IP específiques</translation> </message> <message> <location line="+76"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation>Enviar ordre al node en execució a &lt;ip&gt; (per defecte: 127.0.0.1)</translation> </message> <message> <location line="-120"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation>Executar orde quan el millor bloc canviï (%s al cmd es reemplaça per un bloc de hash)</translation> </message> <message> <location line="+147"/> <source>Upgrade wallet to latest format</source> <translation>Actualitzar moneder a l&apos;últim format</translation> </message> <message> <location line="-21"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation>Establir límit de nombre de claus a &lt;n&gt; (per defecte: 100)</translation> </message> <message> <location line="-12"/> <source>Rescan the block chain for missing wallet transactions</source> <translation>Re-escanejar cadena de blocs en cerca de transaccions de moneder perdudes</translation> </message> <message> <location line="+35"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation>Utilitzar OpenSSL (https) per a connexions JSON-RPC</translation> </message> <message> <location line="-26"/> <source>Server certificate file (default: server.cert)</source> <translation>Arxiu del certificat de servidor (per defecte: server.cert)</translation> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation>Clau privada del servidor (per defecte: server.pem)</translation> </message> <message> <location line="-151"/> <source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source> <translation>Xifrats acceptats (per defecte: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation> </message> <message> <location line="+165"/> <source>This help message</source> <translation>Aquest misatge d&apos;ajuda</translation> </message> <message> <location line="+6"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation>Impossible d&apos;unir %s a aquest ordinador (s&apos;ha retornat l&apos;error %d, %s)</translation> </message> <message> <location line="-91"/> <source>Connect through socks proxy</source> <translation>Connectar a través de socks proxy</translation> </message> <message> <location line="-10"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation>Permetre consultes DNS per a -addnode, -seednode i -connect</translation> </message> <message> <location line="+55"/> <source>Loading addresses...</source> <translation>Carregant adreces...</translation> </message> <message> <location line="-35"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation>Error carregant wallet.dat: Moneder corrupte</translation> </message> <message> <location line="+1"/> <source>Error loading wallet.dat: Wallet requires newer version of Dogecoin</source> <translation>Error carregant wallet.dat: El moneder requereix una versió de Dogecoin més moderna</translation> </message> <message> <location line="+93"/> <source>Wallet needed to be rewritten: restart Dogecoin to complete</source> <translation>El moneder necesita ser re-escrit: re-inicia Dogecoin per a completar la tasca</translation> </message> <message> <location line="-95"/> <source>Error loading wallet.dat</source> <translation>Error carregant wallet.dat</translation> </message> <message> <location line="+28"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation>Adreça -proxy invalida: &apos;%s&apos;</translation> </message> <message> <location line="+56"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation>Xarxa desconeguda especificada a -onlynet: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown -socks proxy version requested: %i</source> <translation>S&apos;ha demanat una versió desconeguda de -socks proxy: %i</translation> </message> <message> <location line="-96"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation>No es pot resoldre l&apos;adreça -bind: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation>No es pot resoldre l&apos;adreça -externalip: &apos;%s&apos;</translation> </message> <message> <location line="+44"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Quantitat invalida per a -paytxfee=&lt;amount&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Invalid amount</source> <translation>Quanitat invalida</translation> </message> <message> <location line="-6"/> <source>Insufficient funds</source> <translation>Balanç insuficient</translation> </message> <message> <location line="+10"/> <source>Loading block index...</source> <translation>Carregant índex de blocs...</translation> </message> <message> <location line="-57"/> <source>Add a node to connect to and attempt to keep the connection open</source> <translation>Afegir un node per a connectar&apos;s-hi i intentar mantenir la connexió oberta</translation> </message> <message> <location line="-25"/> <source>Unable to bind to %s on this computer. Dogecoin is probably already running.</source> <translation>Impossible d&apos;unir %s en aquest ordinador. Probablement Dogecoin ja estigui en execució.</translation> </message> <message> <location line="+64"/> <source>Fee per KB to add to transactions you send</source> <translation>Comisió a afegir per cada KB de transaccions que enviïs</translation> </message> <message> <location line="+19"/> <source>Loading wallet...</source> <translation>Carregant moneder...</translation> </message> <message> <location line="-52"/> <source>Cannot downgrade wallet</source> <translation>No es pot reduir la versió del moneder</translation> </message> <message> <location line="+3"/> <source>Cannot write default address</source> <translation>No es pot escriure l&apos;adreça per defecte</translation> </message> <message> <location line="+64"/> <source>Rescanning...</source> <translation>Re-escanejant...</translation> </message> <message> <location line="-57"/> <source>Done loading</source> <translation>Càrrega acabada</translation> </message> <message> <location line="+82"/> <source>To use the %s option</source> <translation>Utilitza la opció %s</translation> </message> <message> <location line="-74"/> <source>Error</source> <translation>Error</translation> </message> <message> <location line="-31"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation>Has de configurar el rpcpassword=&lt;password&gt; a l&apos;arxiu de configuració:\n %s\n Si l&apos;arxiu no existeix, crea&apos;l amb els permís owner-readable-only.</translation> </message> </context> </TS><|fim▁end|>
</message> <message>
<|file_name|>uritests.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2009-2014 The nealcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "uritests.h" #include "guiutil.h" #include "walletmodel.h" #include <QUrl> void URITests::uriTests() { SendCoinsRecipient rv; QUrl uri; uri.setUrl(QString("nealcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?req-dontexist=")); QVERIFY(!GUIUtil::parsenealcoinURI(uri, &rv)); uri.setUrl(QString("nealcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?dontexist=")); QVERIFY(GUIUtil::parsenealcoinURI(uri, &rv)); QVERIFY(rv.address == QString("175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W")); QVERIFY(rv.label == QString()); QVERIFY(rv.amount == 0); uri.setUrl(QString("nealcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?label=Wikipedia Example Address")); QVERIFY(GUIUtil::parsenealcoinURI(uri, &rv)); QVERIFY(rv.address == QString("175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W")); QVERIFY(rv.label == QString("Wikipedia Example Address")); QVERIFY(rv.amount == 0); uri.setUrl(QString("nealcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=0.001")); QVERIFY(GUIUtil::parsenealcoinURI(uri, &rv)); QVERIFY(rv.address == QString("175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W")); QVERIFY(rv.label == QString()); QVERIFY(rv.amount == 100000); uri.setUrl(QString("nealcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=1.001")); QVERIFY(GUIUtil::parsenealcoinURI(uri, &rv)); QVERIFY(rv.address == QString("175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W")); QVERIFY(rv.label == QString()); QVERIFY(rv.amount == 100100000); uri.setUrl(QString("nealcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=100&label=Wikipedia Example")); QVERIFY(GUIUtil::parsenealcoinURI(uri, &rv)); QVERIFY(rv.address == QString("175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W")); QVERIFY(rv.amount == 10000000000LL); QVERIFY(rv.label == QString("Wikipedia Example")); uri.setUrl(QString("nealcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?message=Wikipedia Example Address")); QVERIFY(GUIUtil::parsenealcoinURI(uri, &rv)); QVERIFY(rv.address == QString("175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W")); QVERIFY(rv.label == QString()); QVERIFY(GUIUtil::parsenealcoinURI("nealcoin://175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?message=Wikipedia Example Address", &rv)); QVERIFY(rv.address == QString("175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W")); QVERIFY(rv.label == QString()); uri.setUrl(QString("nealcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?req-message=Wikipedia Example Address")); QVERIFY(GUIUtil::parsenealcoinURI(uri, &rv)); uri.setUrl(QString("nealcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=1,000&label=Wikipedia Example")); QVERIFY(!GUIUtil::parsenealcoinURI(uri, &rv));<|fim▁hole|> QVERIFY(!GUIUtil::parsenealcoinURI(uri, &rv)); }<|fim▁end|>
uri.setUrl(QString("nealcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=1,000.0&label=Wikipedia Example"));
<|file_name|>NamedRange.java<|end_file_name|><|fim▁begin|>/******************************************************************************* * Copyright (c) 2009-2012, University of Manchester * * Licensed under the New BSD License. * Please see LICENSE file that is distributed with the source code ******************************************************************************/ package uk.ac.manchester.cs.owl.semspreadsheets.model; /** * Author: Matthew Horridge<br><|fim▁hole|> * Information Management Group<br> * Date: 08-Nov-2009 */ public interface NamedRange { String getName(); Range getRange(); }<|fim▁end|>
* The University of Manchester<br>
<|file_name|>ForgeryPyGenerateVirtualData.py<|end_file_name|><|fim▁begin|>from forgery_py import * for x in range(20): randPerson=name.first_name(),name.last_name(),personal.gender(),name.location(),address.phone() randCV=lorem_ipsum.title(),lorem_ipsum.sentence() randAddr=address.city(),address.state(),address.country(),address.continent() randEmail=internet.email_address() randColor=basic.hex_color()<|fim▁hole|> print("name: {}\n gender: {}\n home: {}\n phone: {}\n email: {}". format(randPerson[:2],randPerson[2],randPerson[3],randPerson[4],randEmail)) print(f" CV: {randCV}") print(f" favourite color: {randColor}") print(f" comment: {randComment}") print("handout date: {:#^50s}".format(str(randDate)))<|fim▁end|>
randComment=basic.text(200) randDate=date.date()
<|file_name|>ad_param_service_unittest.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: UTF-8 -*- # # Copyright 2010 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests to cover AdParamService.""" __author__ = '[email protected] (Stan Grinberg)' import os import sys sys.path.insert(0, os.path.join('..', '..', '..')) import unittest from datetime import date from adspygoogle.common import Utils from tests.adspygoogle.adwords import HTTP_PROXY from tests.adspygoogle.adwords import SERVER_V201109 from tests.adspygoogle.adwords import TEST_VERSION_V201109 from tests.adspygoogle.adwords import VERSION_V201109 from tests.adspygoogle.adwords import client class AdParamServiceTestV201109(unittest.TestCase): """Unittest suite for AdParamService using v201109.""" SERVER = SERVER_V201109 VERSION = VERSION_V201109 client.debug = False service = None ad_group_id = '0' text_ad_id = '0' criterion_id = '0' has_param = False def setUp(self): """Prepare unittest.""" print self.id() if not self.__class__.service: self.__class__.service = client.GetAdParamService( self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY) if (self.__class__.ad_group_id == '0' or self.__class__.text_ad_id == '0' or self.__class__.criterion_id == '0'): campaign_service = client.GetCampaignService( self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY) operations = [{ 'operator': 'ADD', 'operand': { 'name': 'Campaign #%s' % Utils.GetUniqueName(), 'status': 'PAUSED', 'biddingStrategy': { 'xsi_type': 'ManualCPC' }, 'endDate': date(date.today().year + 1, 12, 31).strftime('%Y%m%d'), 'budget': { 'period': 'DAILY', 'amount': { 'microAmount': '2000000' }, 'deliveryMethod': 'STANDARD' } } }] campaign_id = campaign_service.Mutate(operations)[0]['value'][0]['id'] ad_group_service = client.GetAdGroupService( self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY) operations = [{ 'operator': 'ADD', 'operand': { 'campaignId': campaign_id, 'name': 'AdGroup #%s' % Utils.GetUniqueName(), 'status': 'ENABLED', 'bids': { 'xsi_type': 'ManualCPCAdGroupBids', 'keywordMaxCpc': { 'amount': { 'microAmount': '1000000' } } } } }] self.__class__.ad_group_id = ad_group_service.Mutate( operations)[0]['value'][0]['id'] ad_group_ad_service = client.GetAdGroupAdService( self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY) operations = [{ 'operator': 'ADD', 'operand': { 'xsi_type': 'AdGroupAd', 'adGroupId': self.__class__.ad_group_id, 'ad': { 'xsi_type': 'TextAd', 'url': 'http://www.example.com', 'displayUrl': 'example.com',<|fim▁hole|> 'description1': 'Good deals, only {param2:} left', 'description2': 'Low prices under {param1:}!', 'headline': 'MacBook Pro Sale' }, 'status': 'ENABLED' } }] self.__class__.text_ad_id = ad_group_ad_service.Mutate( operations)[0]['value'][0]['ad']['id'] ad_group_criterion_service = client.GetAdGroupCriterionService( self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY) operations = [{ 'operator': 'ADD', 'operand': { 'xsi_type': 'BiddableAdGroupCriterion', 'adGroupId': self.__class__.ad_group_id, 'criterion': { 'xsi_type': 'Keyword', 'matchType': 'BROAD', 'text': 'macbook pro' } } }] self.__class__.criterion_id = ad_group_criterion_service.Mutate( operations)[0]['value'][0]['criterion']['id'] def testGetAdParam(self): """Test whether we can fetch an existing ad param for a given ad group.""" if not self.__class__.has_param: self.testCreateAdParam() selector = { 'fields': ['AdGroupId', 'CriterionId', 'InsertionText', 'ParamIndex'], 'predicates': [{ 'field': 'AdGroupId', 'operator': 'EQUALS', 'values': [self.__class__.ad_group_id] }, { 'field': 'CriterionId', 'operator': 'EQUALS', 'values': [self.__class__.criterion_id] }] } self.assert_(isinstance(self.__class__.service.Get(selector), tuple)) def testCreateAdParam(self): """Test whether we can create a new ad param.""" operations = [ { 'operator': 'SET', 'operand': { 'adGroupId': self.__class__.ad_group_id, 'criterionId': self.__class__.criterion_id, 'insertionText': '$1,699', 'paramIndex': '1' } }, { 'operator': 'SET', 'operand': { 'adGroupId': self.__class__.ad_group_id, 'criterionId': self.__class__.criterion_id, 'insertionText': '139', 'paramIndex': '2' } } ] self.assert_(isinstance(self.__class__.service.Mutate(operations), tuple)) self.__class__.has_param = True def makeTestSuiteV201109(): """Set up test suite using v201109. Returns: TestSuite test suite using v201109. """ suite = unittest.TestSuite() suite.addTests(unittest.makeSuite(AdParamServiceTestV201109)) return suite if __name__ == '__main__': suites = [] if TEST_VERSION_V201109: suites.append(makeTestSuiteV201109()) if suites: alltests = unittest.TestSuite(suites) unittest.main(defaultTest='alltests')<|fim▁end|>
<|file_name|>user.ts<|end_file_name|><|fim▁begin|>export class User { public id: number; public username: string; public platformAdmin: boolean;<|fim▁hole|><|fim▁end|>
}
<|file_name|>0002_transcriptpage_updated_at.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-06-16 22:52 from __future__ import unicode_literals import datetime from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('transcripts', '0001_initial'), ]<|fim▁hole|> name='updated_at', field=models.DateTimeField(auto_now=True, default=datetime.datetime(2016, 6, 16, 22, 52, 58, 616986, tzinfo=utc)), preserve_default=False, ), ]<|fim▁end|>
operations = [ migrations.AddField( model_name='transcriptpage',
<|file_name|>cmpxchg.rs<|end_file_name|><|fim▁begin|>use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode}; use ::RegType::*; use ::instruction_def::*; use ::Operand::*; use ::Reg::*; use ::RegScale::*; fn cmpxchg_1() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(Direct(DL)), operand2: Some(Direct(BL)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 176, 218], OperandSize::Word) } fn cmpxchg_2() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(IndirectDisplaced(BX, 2132, Some(OperandSize::Byte), None)), operand2: Some(Direct(BL)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 176, 159, 84, 8], OperandSize::Word) } fn cmpxchg_3() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(Direct(DL)), operand2: Some(Direct(BL)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 176, 218], OperandSize::Dword) } fn cmpxchg_4() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(IndirectScaledIndexedDisplaced(ESI, ESI, Two, 302595767, Some(OperandSize::Byte), None)), operand2: Some(Direct(BL)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 176, 156, 118, 183, 62, 9, 18], OperandSize::Dword) } fn cmpxchg_5() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(Direct(DL)), operand2: Some(Direct(CL)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 176, 202], OperandSize::Qword) } <|fim▁hole|> fn cmpxchg_7() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(Direct(BL)), operand2: Some(Direct(CL)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 176, 203], OperandSize::Qword) } fn cmpxchg_8() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(IndirectScaledDisplaced(RSI, Eight, 725028548, Some(OperandSize::Byte), None)), operand2: Some(Direct(BL)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 176, 28, 245, 196, 14, 55, 43], OperandSize::Qword) } fn cmpxchg_9() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(Direct(BP)), operand2: Some(Direct(DX)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 177, 213], OperandSize::Word) } fn cmpxchg_10() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(IndirectDisplaced(SI, 4088, Some(OperandSize::Word), None)), operand2: Some(Direct(DI)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 177, 188, 248, 15], OperandSize::Word) } fn cmpxchg_11() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(Direct(DX)), operand2: Some(Direct(SP)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 177, 226], OperandSize::Dword) } fn cmpxchg_12() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(IndirectScaledDisplaced(ESI, Two, 654255617, Some(OperandSize::Word), None)), operand2: Some(Direct(DI)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 177, 60, 117, 1, 38, 255, 38], OperandSize::Dword) } fn cmpxchg_13() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(Direct(DX)), operand2: Some(Direct(DX)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 177, 210], OperandSize::Qword) } fn cmpxchg_14() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(Indirect(RDX, Some(OperandSize::Word), None)), operand2: Some(Direct(SP)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 177, 34], OperandSize::Qword) } fn cmpxchg_15() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(Direct(EDI)), operand2: Some(Direct(EDI)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 177, 255], OperandSize::Word) } fn cmpxchg_16() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(IndirectScaledIndexed(BP, SI, One, Some(OperandSize::Dword), None)), operand2: Some(Direct(ESP)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 177, 34], OperandSize::Word) } fn cmpxchg_17() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(Direct(ESP)), operand2: Some(Direct(EDI)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 177, 252], OperandSize::Dword) } fn cmpxchg_18() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(Indirect(ESI, Some(OperandSize::Dword), None)), operand2: Some(Direct(ECX)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 177, 14], OperandSize::Dword) } fn cmpxchg_19() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(Direct(ESP)), operand2: Some(Direct(EDI)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 177, 252], OperandSize::Qword) } fn cmpxchg_20() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(IndirectScaledIndexed(RSI, RCX, Four, Some(OperandSize::Dword), None)), operand2: Some(Direct(ESP)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 177, 36, 142], OperandSize::Qword) } fn cmpxchg_21() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(Direct(RBX)), operand2: Some(Direct(RBP)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[72, 15, 177, 235], OperandSize::Qword) } fn cmpxchg_22() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(IndirectScaledIndexed(RDI, RCX, Eight, Some(OperandSize::Qword), None)), operand2: Some(Direct(RDI)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[72, 15, 177, 60, 207], OperandSize::Qword) }<|fim▁end|>
fn cmpxchg_6() { run_test(&Instruction { mnemonic: Mnemonic::CMPXCHG, operand1: Some(IndirectScaledDisplaced(RDI, Two, 2091377662, Some(OperandSize::Byte), None)), operand2: Some(Direct(DL)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 176, 20, 125, 254, 227, 167, 124], OperandSize::Qword) }
<|file_name|>implore.es6.js<|end_file_name|><|fim▁begin|>/** * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ 'use strict'; const invariant = require('invariant'); /** * @typedef {object} request * @property {string} method * @property {string} route - string like /abc/:abc * @property {string} params * @property {object} [body] * @property {object} headers */ /** * @typedef {object} reponse * @property {Error} error - an error which occured during req or res * @property {object} body - content received from server (parsed) * @property {object} headers - set additional request headers * @property {number} status - http status code; 0 on failure */ /** * XHR wrapper for same-domain requests with Content-Type: application/json * * @param {request} request * @return {Promise} */ export default function implore(request) { return new Promise(resolve => { const response = { error: null }; invariant( request, 'implore requires a `request` argument' ); invariant( typeof request.route === 'string', 'implore requires parameter `route` to be a string' ); invariant( typeof request.method === 'string', 'implore requires parameter `method` to be a string' ); const xhr = new XMLHttpRequest(); xhr.open(request.method, getURLFromRequest(request)); switch (request.method) { case 'POST': case 'PUT': case 'PATCH': xhr.setRequestHeader('Content-Type', 'application/json'); break; } if (request.headers) { invariant( typeof request.headers === 'object', 'implore requires parameter `headers` to be an object' ); Object.keys(request.headers).forEach((header) => { xhr.setRequestHeader(header, request.headers[header]); }); } xhr.onreadystatechange = function onreadystatechange() { let responseText; if (xhr.readyState === 4) { responseText = xhr.responseText; response.status = xhr.status; response.type = xhr.getResponseHeader('Content-Type'); if (response.type === 'application/json') { try { response.body = JSON.parse(responseText); } catch (err) { err.message = err.message + ' while parsing `' + responseText + '`'; response.body = {}; response.status = xhr.status || 0; response.error = err; } } else { response.body = responseText; } return resolve({ request, response }); } }; try { if (request.body) { xhr.send(JSON.stringify(request.body)); } else { xhr.send(); } } catch (err) { response.body = {}; response.status = 0; response.error = err; return resolve({ request, response }); } }); } implore.get = function get(options) { options.method = 'GET'; return implore(options); }; implore.post = function post(options) { options.method = 'POST'; return implore(options); }; implore.put = function put(options) { options.method = 'PUT'; return implore(options); };<|fim▁hole|> return implore(options); }; /** * Combine the route/params/query of a request into a complete URL * * @param {request} request * @param {object|array} request.query * @return {string} url */ function getURLFromRequest(request) { const queryString = makeQueryString(request.query || {}); let formatted = request.route; let name; let value; let regexp; for (name in request.params) { if (request.params.hasOwnProperty(name)) { value = request.params[name]; regexp = new RegExp(':' + name + '(?=(\\\/|$))'); formatted = formatted.replace(regexp, value); } } return formatted + (queryString ? '?' + queryString : ''); } /** * Take a simple object and turn it into a queryString, recursively. * * @param {object} obj - query object * @param {string} prefix - used in recursive calls to keep track of the parent * @return {string} queryString without the '?'' */ function makeQueryString(obj, prefix='') { const str = []; let prop; let key; let value; for (prop in obj) { if (obj.hasOwnProperty(prop)) { key = prefix ? prefix + '[' + prop + ']' : prop; value = obj[prop]; str.push(typeof value === 'object' ? makeQueryString(value, key) : encodeURIComponent(key) + '=' + encodeURIComponent(value)); } } return str.join('&'); }<|fim▁end|>
implore.delete = function httpDelete(options) { options.method = 'DELETE';
<|file_name|>resource_aws_subnet.go<|end_file_name|><|fim▁begin|>package aws import ( "fmt" "log" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" ) func resourceAwsSubnet() *schema.Resource { return &schema.Resource{ Create: resourceAwsSubnetCreate, Read: resourceAwsSubnetRead, Update: resourceAwsSubnetUpdate, Delete: resourceAwsSubnetDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(10 * time.Minute), Delete: schema.DefaultTimeout(10 * time.Minute), }, SchemaVersion: 1, MigrateState: resourceAwsSubnetMigrateState, Schema: map[string]*schema.Schema{ "vpc_id": { Type: schema.TypeString, Required: true, ForceNew: true, }, "cidr_block": { Type: schema.TypeString, Required: true, ForceNew: true, }, "ipv6_cidr_block": { Type: schema.TypeString, Optional: true, Computed: true, }, "availability_zone": { Type: schema.TypeString, Optional: true, Computed: true, ForceNew: true, ConflictsWith: []string{"availability_zone_id"}, }, "availability_zone_id": { Type: schema.TypeString, Optional: true, Computed: true, ForceNew: true, ConflictsWith: []string{"availability_zone"}, }, "map_public_ip_on_launch": { Type: schema.TypeBool, Optional: true, Default: false, }, "assign_ipv6_address_on_creation": { Type: schema.TypeBool, Optional: true, Default: false, }, "ipv6_cidr_block_association_id": { Type: schema.TypeString, Computed: true, }, "arn": { Type: schema.TypeString, Computed: true, }, "tags": tagsSchema(), "owner_id": { Type: schema.TypeString, Computed: true, }, }, } } func resourceAwsSubnetCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn createOpts := &ec2.CreateSubnetInput{ AvailabilityZone: aws.String(d.Get("availability_zone").(string)), AvailabilityZoneId: aws.String(d.Get("availability_zone_id").(string)), CidrBlock: aws.String(d.Get("cidr_block").(string)), VpcId: aws.String(d.Get("vpc_id").(string)), } if v, ok := d.GetOk("ipv6_cidr_block"); ok { createOpts.Ipv6CidrBlock = aws.String(v.(string)) } var err error resp, err := conn.CreateSubnet(createOpts) if err != nil { return fmt.Errorf("Error creating subnet: %s", err) } // Get the ID and store it subnet := resp.Subnet d.SetId(*subnet.SubnetId) log.Printf("[INFO] Subnet ID: %s", *subnet.SubnetId) // Wait for the Subnet to become available log.Printf("[DEBUG] Waiting for subnet (%s) to become available", *subnet.SubnetId) stateConf := &resource.StateChangeConf{ Pending: []string{"pending"}, Target: []string{"available"}, Refresh: SubnetStateRefreshFunc(conn, *subnet.SubnetId), Timeout: 10 * time.Minute, } _, err = stateConf.WaitForState() if err != nil { return fmt.Errorf( "Error waiting for subnet (%s) to become ready: %s", d.Id(), err) } return resourceAwsSubnetUpdate(d, meta) } func resourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn resp, err := conn.DescribeSubnets(&ec2.DescribeSubnetsInput{ SubnetIds: []*string{aws.String(d.Id())}, }) if err != nil { if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSubnetID.NotFound" { // Update state to indicate the subnet no longer exists. d.SetId("") return nil } return err } if resp == nil { return nil } subnet := resp.Subnets[0] d.Set("vpc_id", subnet.VpcId) d.Set("availability_zone", subnet.AvailabilityZone) d.Set("availability_zone_id", subnet.AvailabilityZoneId) d.Set("cidr_block", subnet.CidrBlock) d.Set("map_public_ip_on_launch", subnet.MapPublicIpOnLaunch) d.Set("assign_ipv6_address_on_creation", subnet.AssignIpv6AddressOnCreation) // Make sure those values are set, if an IPv6 block exists it'll be set in the loop d.Set("ipv6_cidr_block_association_id", "") d.Set("ipv6_cidr_block", "") for _, a := range subnet.Ipv6CidrBlockAssociationSet { if *a.Ipv6CidrBlockState.State == "associated" { //we can only ever have 1 IPv6 block associated at once d.Set("ipv6_cidr_block_association_id", a.AssociationId) d.Set("ipv6_cidr_block", a.Ipv6CidrBlock) break } } d.Set("arn", subnet.SubnetArn) d.Set("tags", tagsToMap(subnet.Tags)) d.Set("owner_id", subnet.OwnerId) return nil } func resourceAwsSubnetUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn d.Partial(true) if err := setTags(conn, d); err != nil { return err } else { d.SetPartial("tags") } if d.HasChange("map_public_ip_on_launch") { modifyOpts := &ec2.ModifySubnetAttributeInput{ SubnetId: aws.String(d.Id()), MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ Value: aws.Bool(d.Get("map_public_ip_on_launch").(bool)), }, } log.Printf("[DEBUG] Subnet modify attributes: %#v", modifyOpts) _, err := conn.ModifySubnetAttribute(modifyOpts) if err != nil { return err } else { d.SetPartial("map_public_ip_on_launch") } } // We have to be careful here to not go through a change of association if this is a new resource // A New resource here would denote that the Update func is called by the Create func if d.HasChange("ipv6_cidr_block") && !d.IsNewResource() { // We need to handle that we disassociate the IPv6 CIDR block before we try and associate the new one // This could be an issue as, we could error out when we try and add the new one // We may need to roll back the state and reattach the old one if this is the case _, new := d.GetChange("ipv6_cidr_block") if v, ok := d.GetOk("ipv6_cidr_block_association_id"); ok { //Firstly we have to disassociate the old IPv6 CIDR Block disassociateOps := &ec2.DisassociateSubnetCidrBlockInput{ AssociationId: aws.String(v.(string)), } _, err := conn.DisassociateSubnetCidrBlock(disassociateOps) if err != nil { return err } // Wait for the CIDR to become disassociated log.Printf( "[DEBUG] Waiting for IPv6 CIDR (%s) to become disassociated", d.Id()) stateConf := &resource.StateChangeConf{ Pending: []string{"disassociating", "associated"}, Target: []string{"disassociated"}, Refresh: SubnetIpv6CidrStateRefreshFunc(conn, d.Id(), d.Get("ipv6_cidr_block_association_id").(string)), Timeout: 3 * time.Minute, }<|fim▁hole|> if _, err := stateConf.WaitForState(); err != nil { return fmt.Errorf( "Error waiting for IPv6 CIDR (%s) to become disassociated: %s", d.Id(), err) } } //Now we need to try and associate the new CIDR block associatesOpts := &ec2.AssociateSubnetCidrBlockInput{ SubnetId: aws.String(d.Id()), Ipv6CidrBlock: aws.String(new.(string)), } resp, err := conn.AssociateSubnetCidrBlock(associatesOpts) if err != nil { //The big question here is, do we want to try and reassociate the old one?? //If we have a failure here, then we may be in a situation that we have nothing associated return err } // Wait for the CIDR to become associated log.Printf( "[DEBUG] Waiting for IPv6 CIDR (%s) to become associated", d.Id()) stateConf := &resource.StateChangeConf{ Pending: []string{"associating", "disassociated"}, Target: []string{"associated"}, Refresh: SubnetIpv6CidrStateRefreshFunc(conn, d.Id(), *resp.Ipv6CidrBlockAssociation.AssociationId), Timeout: 3 * time.Minute, } if _, err := stateConf.WaitForState(); err != nil { return fmt.Errorf( "Error waiting for IPv6 CIDR (%s) to become associated: %s", d.Id(), err) } d.SetPartial("ipv6_cidr_block") } if d.HasChange("assign_ipv6_address_on_creation") { modifyOpts := &ec2.ModifySubnetAttributeInput{ SubnetId: aws.String(d.Id()), AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{ Value: aws.Bool(d.Get("assign_ipv6_address_on_creation").(bool)), }, } log.Printf("[DEBUG] Subnet modify attributes: %#v", modifyOpts) _, err := conn.ModifySubnetAttribute(modifyOpts) if err != nil { return err } else { d.SetPartial("assign_ipv6_address_on_creation") } } d.Partial(false) return resourceAwsSubnetRead(d, meta) } func resourceAwsSubnetDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn log.Printf("[INFO] Deleting subnet: %s", d.Id()) if err := deleteLingeringLambdaENIs(conn, d, "subnet-id"); err != nil { return fmt.Errorf("Failed to delete Lambda ENIs: %s", err) } req := &ec2.DeleteSubnetInput{ SubnetId: aws.String(d.Id()), } wait := resource.StateChangeConf{ Pending: []string{"pending"}, Target: []string{"destroyed"}, Timeout: 10 * time.Minute, MinTimeout: 1 * time.Second, Refresh: func() (interface{}, string, error) { _, err := conn.DeleteSubnet(req) if err != nil { if apiErr, ok := err.(awserr.Error); ok { if apiErr.Code() == "DependencyViolation" { // There is some pending operation, so just retry // in a bit. return 42, "pending", nil } if apiErr.Code() == "InvalidSubnetID.NotFound" { return 42, "destroyed", nil } } return 42, "failure", err } return 42, "destroyed", nil }, } if _, err := wait.WaitForState(); err != nil { return fmt.Errorf("Error deleting subnet: %s", err) } return nil } // SubnetStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch a Subnet. func SubnetStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { return func() (interface{}, string, error) { resp, err := conn.DescribeSubnets(&ec2.DescribeSubnetsInput{ SubnetIds: []*string{aws.String(id)}, }) if err != nil { if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSubnetID.NotFound" { resp = nil } else { log.Printf("Error on SubnetStateRefresh: %s", err) return nil, "", err } } if resp == nil { // Sometimes AWS just has consistency issues and doesn't see // our instance yet. Return an empty state. return nil, "", nil } subnet := resp.Subnets[0] return subnet, *subnet.State, nil } } func SubnetIpv6CidrStateRefreshFunc(conn *ec2.EC2, id string, associationId string) resource.StateRefreshFunc { return func() (interface{}, string, error) { opts := &ec2.DescribeSubnetsInput{ SubnetIds: []*string{aws.String(id)}, } resp, err := conn.DescribeSubnets(opts) if err != nil { if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSubnetID.NotFound" { resp = nil } else { log.Printf("Error on SubnetIpv6CidrStateRefreshFunc: %s", err) return nil, "", err } } if resp == nil { // Sometimes AWS just has consistency issues and doesn't see // our instance yet. Return an empty state. return nil, "", nil } if resp.Subnets[0].Ipv6CidrBlockAssociationSet == nil { return nil, "", nil } for _, association := range resp.Subnets[0].Ipv6CidrBlockAssociationSet { if *association.AssociationId == associationId { return association, *association.Ipv6CidrBlockState.State, nil } } return nil, "", nil } }<|fim▁end|>
<|file_name|>location.py<|end_file_name|><|fim▁begin|>from geopy.point import Point class Location(object): def __init__(self, name="", point=None, attributes=None, **kwargs): self.name = name if point is not None: self.point = Point(point)<|fim▁hole|> def __getitem__(self, index): """Backwards compatibility with geopy 0.93 tuples.""" return (self.name, self.point)[index] def __repr__(self): return "Location(%r, %r)" % (self.name, self.point) def __iter__(self): return iter((self.name, self.point)) def __eq__(self, other): return (self.name, self.point) == (other.name, other.point) def __ne__(self, other): return (self.name, self.point) != (other.name, other.point)<|fim▁end|>
if attributes is None: attributes = {} self.attributes = dict(attributes, **kwargs)
<|file_name|>printf_parse.py<|end_file_name|><|fim▁begin|># ported from gnulib rev be7d73709d2b3bceb987f1be00a049bb7021bf87 # # Copyright (C) 2014, Mark Laws. # Copyright (C) 1999, 2002-2003, 2005-2007, 2009-2014 Free Software # Foundation, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License along # with this program; if not, see <http://www.gnu.org/licenses/>. import ctypes from flufl.enum import Enum sizeof = ctypes.sizeof Arg_type = Enum('Arg_type', [str(x.strip()) for x in ''' TYPE_NONE TYPE_SCHAR TYPE_UCHAR TYPE_SHORT TYPE_USHORT TYPE_INT TYPE_UINT TYPE_LONGINT TYPE_ULONGINT TYPE_LONGLONGINT TYPE_ULONGLONGINT TYPE_DOUBLE TYPE_LONGDOUBLE TYPE_CHAR TYPE_WIDE_CHAR TYPE_STRING TYPE_WIDE_STRING TYPE_POINTER TYPE_COUNT_SCHAR_POINTER TYPE_COUNT_SHORT_POINTER TYPE_COUNT_INT_POINTER TYPE_COUNT_LONGINT_POINTER TYPE_COUNT_LONGLONGINT_POINTER '''.splitlines() if x != '']) FLAG_GROUP = 1 # ' flag FLAG_LEFT = 2 # - flag FLAG_SHOWSIGN = 4 # + flag FLAG_SPACE = 8 # space flag FLAG_ALT = 16 # # flag FLAG_ZERO = 32 # arg_index value indicating that no argument is consumed. ARG_NONE = ~0 class Argument(object): __slots__ = ['type', 'data'] class Arguments(object): __slots__ = ['count', 'arg'] def __init__(self): self.count = 0 self.arg = [] class Directive(object): '''A parsed directive.''' __slots__ = ['dir_start', 'dir_end', 'flags', 'width_start', 'width_end', 'width_arg_index', 'precision_start', 'precision_end', 'precision_arg_index', 'conversion', 'arg_index'] # conversion: d i o u x X f F e E g G a A c s p n U % but not C S def __init__(self): self.flags = 0 self.width_start = None self.width_end = None self.width_arg_index = ARG_NONE self.precision_start = None self.precision_end = None self.precision_arg_index = ARG_NONE self.arg_index = ARG_NONE class Directives(object): '''A parsed format string.''' __slots__ = ['count', 'dir', 'max_width_length', 'max_precision_length'] def __init__(self): self.count = 0 self.dir = [] def REGISTER_ARG(a, index, type): n = index while a.count <= n: try: a.arg[a.count] except IndexError: a.arg.append(Argument()) a.arg[a.count].type = Arg_type.TYPE_NONE a.count += 1 if a.arg[n].type == Arg_type.TYPE_NONE: a.arg[n].type = type elif a.arg[n].type != type: raise ValueError('ambiguous type for positional argument') def conv_signed(c, flags): # If 'long long' exists and is larger than 'long': if flags >= 16 or flags & 4: return c, Arg_type.TYPE_LONGLONGINT else: # If 'long long' exists and is the same as 'long', we parse "lld" into # TYPE_LONGINT. if flags >= 8: type = Arg_type.TYPE_LONGINT elif flags & 2: type = Arg_type.TYPE_SCHAR elif flags & 1: type = Arg_type.TYPE_SHORT else: type = Arg_type.TYPE_INT return c, type def conv_unsigned(c, flags): # If 'long long' exists and is larger than 'long': if flags >= 16 or flags & 4: return c, Arg_type.TYPE_ULONGLONGINT else: # If 'unsigned long long' exists and is the same as 'unsigned long', we # parse "llu" into TYPE_ULONGINT. if flags >= 8: type = Arg_type.TYPE_ULONGINT elif flags & 2: type = Arg_type.TYPE_UCHAR elif flags & 1: type = Arg_type.TYPE_USHORT else: type = Arg_type.TYPE_UINT return c, type def conv_float(c, flags): if flags >= 16 or flags & 4: return c, Arg_type.TYPE_LONGDOUBLE else: return c, Arg_type.TYPE_DOUBLE def conv_char(c, flags): if flags >= 8: return c, Arg_type.TYPE_WIDE_CHAR else: return c, Arg_type.TYPE_CHAR def conv_widechar(c, flags): c = 'c' return c, Arg_type.TYPE_WIDE_CHAR def conv_string(c, flags): if flags >= 8: return c, Arg_type.TYPE_WIDE_STRING else: return c, Arg_type.TYPE_STRING def conv_widestring(c, flags): c = 's' return c, Arg_type.TYPE_WIDE_STRING def conv_pointer(c, flags): return c, Arg_type.TYPE_POINTER def conv_intpointer(c, flags): # If 'long long' exists and is larger than 'long': if flags >= 16 or flags & 4: return c, Arg_type.TYPE_COUNT_LONGLONGINT_POINTER else: # If 'long long' exists and is the same as 'long', we parse "lln" into # TYPE_COUNT_LONGINT_POINTER. if flags >= 8: type = Arg_type.TYPE_COUNT_LONGINT_POINTER elif flags & 2: type = Arg_type.TYPE_COUNT_SCHAR_POINTER elif flags & 1: type = Arg_type.TYPE_COUNT_SHORT_POINTER else: type = Arg_type.TYPE_COUNT_INT_POINTER return c, type def conv_none(c, flags): return c, Arg_type.TYPE_NONE _conv_char = { 'd': conv_signed, 'i': conv_signed, 'o': conv_unsigned, 'u': conv_unsigned, 'x': conv_unsigned, 'X': conv_unsigned, 'f': conv_float, 'F': conv_float, 'e': conv_float, 'E': conv_float, 'g': conv_float, 'G': conv_float, 'a': conv_float, 'A': conv_float, 'c': conv_char, 'C': conv_widechar, 's': conv_string, 'S': conv_widestring, 'p': conv_pointer, 'n': conv_intpointer, '%': conv_none } def printf_parse(fmt): '''Parses the format string. Fills in the number N of directives, and fills in directives[0], ..., directives[N-1], and sets directives[N].dir_start to the end of the format string. Also fills in the arg_type fields of the arguments and the needed count of arguments.''' cp = 0 # index into format string arg_posn = 0 # number of regular arguments consumed max_width_length = 0 max_precision_length = 0 d = Directives() a = Arguments() while True: try: c = fmt[cp] except IndexError: break cp += 1 if c == '%': arg_index = ARG_NONE d.dir.append(Directive()) dp = d.dir[d.count] dp.dir_start = cp - 1 # Test for positional argument. if fmt[cp].isdigit(): np = cp while fmt[np].isdigit(): np += 1 if fmt[np] == '$': n = 0 np = cp while fmt[np].isdigit(): n = n * 10 + (ord(fmt[np]) - ord('0')) np += 1 if n == 0: raise ValueError('positional argument 0') arg_index = n - 1 cp = np + 1 # Read the flags. while True: if fmt[cp] == '\'': dp.flags |= FLAG_GROUP cp += 1 elif fmt[cp] == '-': dp.flags |= FLAG_LEFT cp += 1 elif fmt[cp] == '+': dp.flags |= FLAG_SHOWSIGN cp += 1 elif fmt[cp] == ' ': dp.flags |= FLAG_SPACE cp += 1 elif fmt[cp] == '#': dp.flags |= FLAG_ALT cp += 1 elif fmt[cp] == '0': dp.flags |= FLAG_ZERO cp += 1 else: break # Parse the field width. if fmt[cp] == '*': dp.width_start = cp cp += 1 dp.width_end = cp if max_width_length < 1: max_width_length = 1 # Test for positional argument. if fmt[cp].isdigit(): np = cp while fmt[np].isdigit(): np += 1 if fmt[np] == '$': n = 0 np = cp while fmt[np].isdigit(): n = n * 10 + (ord(fmt[np]) - ord('0')) np += 1 if n == 0: raise ValueError('positional argument 0') dp.width_arg_index = n - 1 cp = np + 1 if dp.width_arg_index == ARG_NONE: dp.width_arg_index = arg_posn arg_posn += 1 REGISTER_ARG(a, dp.width_arg_index, Arg_type.TYPE_INT) elif fmt[cp].isdigit(): dp.width_start = cp while fmt[cp].isdigit(): cp += 1 dp.width_end = cp width_length = dp.width_end - dp.width_start if max_width_length < width_length: max_width_length = width_length # Parse the precision. if fmt[cp] == '.': cp += 1 if fmt[cp] == '*': dp.precision_start = cp - 1 cp += 1 dp.precision_end = cp<|fim▁hole|> if max_precision_length < 2: max_precision_length = 2 # Test for positional argument. if fmt[cp].isdigit(): np = cp while fmt[np].isdigit(): np += 1 if fmt[np] == '$': n = 0 np = cp while fmt[np].isdigit(): n = n * 10 + (ord(fmt[np]) - ord('0')) np += 1 if n == 0: raise ValueError('positional argument 0') dp.precision_arg_index = n - 1 cp = np + 1 if dp.precision_arg_index == ARG_NONE: dp.precision_arg_index = arg_posn arg_posn += 1 REGISTER_ARG(a, dp.precision_arg_index, Arg_type.TYPE_INT) else: dp.precision_start = cp - 1 while fmt[cp].isdigit(): cp += 1 dp.precision_end = cp precision_length = dp.precision_end - dp.precision_start if max_precision_length < precision_length: max_precision_length = precision_length # Parse argument type/size specifiers. flags = 0 while True: if fmt[cp] == 'h': flags |= (1 << (flags & 1)) cp += 1 elif fmt[cp] == 'L': flags |= 4 cp += 1 elif fmt[cp] == 'l': flags += 8 cp += 1 elif fmt[cp] == 'j': raise ValueError("don't know how to handle intmax_t") elif fmt[cp] == 'z': if sizeof(ctypes.c_size_t) > sizeof(ctypes.c_long): # size_t = long long flags += 16 elif sizeof(ctypes.c_size_t) > sizeof(ctypes.c_int): # size_t = long flags += 8 cp += 1 elif fmt[cp] == 't': raise ValueError("don't know how to handle ptrdiff_t") else: break # Read the conversion character. c = fmt[cp] cp += 1 try: c, type = _conv_char[c](c, flags) except KeyError: raise ValueError('bad conversion character: %%%s' % c) if type != Arg_type.TYPE_NONE: dp.arg_index = arg_index if dp.arg_index == ARG_NONE: dp.arg_index = arg_posn arg_posn += 1 REGISTER_ARG(a, dp.arg_index, type) dp.conversion = c dp.dir_end = cp d.count += 1 d.dir.append(Directive()) d.dir[d.count].dir_start = cp d.max_width_length = max_width_length d.max_precision_length = max_precision_length return d, a<|fim▁end|>
<|file_name|>alpineobject.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals from __future__ import absolute_import import json import logging import logging.config import os class AlpineObject(object): """ Base Class of Alpine API objects """ # # alpine alpine version string # _alpine_api_version = "v1" _min_alpine_version = "6.2" def __init__(self, base_url=None, session=None, token=None): self.base_url = base_url self.session = session self.token = token self._setup_logging() # Get loggers from the configuration files(logging.json) if exists # For detail, reference logging.json self.logger = logging.getLogger("debug") # debug def _add_token_to_url(self, url): """ Used internally to properly form URLs. :param str url: An Alpine API URL :return: Formatted URL :rtype str: """ return str("{0}?session_id={1}".format(url, self.token)) @staticmethod<|fim▁hole|> env_key='LOG_CFG'): """ Sets internal values for logging through a file or an environmental variable :param str default_configuration_setting_file: Path to logging config file. Will be overwritten by environment variable if it exists. :param default_level: See possible levels here: https://docs.python.org/2/library/logging.html#logging-levels :param str env_key: Name of environment variable with logging setting. :return: None """ path = default_configuration_setting_file value = os.getenv(env_key, None) if value: path = value else: pass if os.path.exists(path): with open(path, 'rt') as f: config = json.load(f) logging.config.dictConfig(config) else: logging.basicConfig(level=default_level, format="%(asctime)s %(name)s %(module)s[%(lineno)d] %(levelname)s: %(message)s")<|fim▁end|>
def _setup_logging(default_configuration_setting_file='logging.json', default_level=logging.INFO,
<|file_name|>cwr.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*- import StringIO import xlsxwriter """ Web app module. """ __author__ = 'Bernardo Martínez Garrido' __license__ = 'MIT' __status__ = 'Development' def generate_cwr_report_excel(cwr): output = StringIO.StringIO() workbook = xlsxwriter.Workbook(output, {'in_memory': True}) _generate_cwr_report_excel_general(workbook, cwr) for group in cwr.transmission.groups: _generate_cwr_report_excel_group(workbook, group) workbook.close() output.seek(0) return output.read() def _generate_cwr_report_excel_group(workbook, group):<|fim▁hole|> bold = workbook.add_format({'bold': 1}) row = 1 col = 0 for transaction in group.transactions: for record in transaction: results_sheet.write(row, col + 1, record.record_type) row += 1 def _generate_cwr_report_excel_general(workbook, cwr): results_sheet = workbook.add_worksheet('General info') bold = workbook.add_format({'bold': 1}) header = cwr.transmission.header trailer = cwr.transmission.trailer row = 1 col = 0 results_sheet.write(row, col, 'Sender ID', bold) results_sheet.write(row, col + 1, header.sender_id) row += 1 results_sheet.write(row, col, 'Sender Name', bold) results_sheet.write(row, col + 1, header.sender_name) row += 1 results_sheet.write(row, col, 'Sender Type', bold) results_sheet.write(row, col + 1, header.sender_name) row += 1 row += 1 results_sheet.write(row, col, 'Creation Date', bold) results_sheet.write(row, col + 1, header.creation_date_time) row += 1 results_sheet.write(row, col, 'Transmission Date', bold) results_sheet.write(row, col + 1, header.transmission_date) row += 1 row += 1 results_sheet.write(row, col, 'EDI Standard', bold) results_sheet.write(row, col + 1, header.edi_standard) row += 1 results_sheet.write(row, col, 'Character Set', bold) results_sheet.write(row, col + 1, header.character_set) row += 1 row += 1 results_sheet.write(row, col, 'Counts', bold) row += 1 results_sheet.write(row, col, 'Groups', bold) results_sheet.write(row, col + 1, trailer.group_count) row += 1 results_sheet.write(row, col, 'Transactions', bold) results_sheet.write(row, col + 1, trailer.transaction_count) row += 1 results_sheet.write(row, col, 'Records', bold) results_sheet.write(row, col + 1, trailer.record_count)<|fim▁end|>
results_sheet = workbook.add_worksheet(group.group_header.transaction_type)
<|file_name|>tax.js<|end_file_name|><|fim▁begin|>/** * Shopware 5 * Copyright (c) shopware AG * * According to our dual licensing model, this program can be used either * under the terms of the GNU Affero General Public License, version 3, * or under a proprietary license. * * The texts of the GNU Affero General Public License with an additional * permission and of our proprietary license can be found at and * in the LICENSE file you have received along with this program. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * "Shopware" is a registered trademark of shopware AG. * The licensing of the program under the AGPLv3 does not imply a * trademark license. Therefore any rights, title and interest in * our trademarks remain entirely with us. * * @category Shopware * @package Base * @subpackage Model * @version $Id$ * @author shopware AG */ /** * Shopware Model - Global Stores and Models * * The shop model represents a data row of the s_core_tax or the * Shopware\Models\Article\Tax doctrine model. */<|fim▁hole|>//{block name="backend/base/model/tax"} Ext.define('Shopware.apps.Base.model.Tax', { /** * Defines an alternate name for this class. */ alternateClassName:'Shopware.model.Tax', /** * Extends the standard ExtJS Model * @string */ extend : 'Shopware.data.Model', /** * unique id * @int */ idProperty:'id', /** * The fields used for this model * @array */ fields : [ //{block name="backend/base/model/tax/fields"}{/block} { name : 'id', type: 'integer' }, { name : 'tax',type: 'float' }, { name : 'name',type: 'string' } ] }); //{/block}<|fim▁end|>
<|file_name|>Avatar.tsx<|end_file_name|><|fim▁begin|>import * as React from 'react'; import { addPropertyControls, ControlType } from 'framer'; // tslint:disable-next-line: ban-ts-ignore // @ts-ignore import MuiAvatar from '@material-ui/core/Avatar'; import { Icon } from './Icon'; interface Props { variant?: 'circle' | 'circular' | 'rounded' | 'square'; backgroundColor?: string; textColor?: string; icon?: string; imageFile?: string; imageUrl?: string; label?: string; width?: number; height?: number; } const defaultProps: Props = { variant: 'circle', backgroundColor: '#4154af', textColor: undefined, icon: 'face', imageFile: '', imageUrl: 'https://i.pravatar.cc/300', label: 'MB', width: 40, height: 40, }; export const Avatar: React.SFC<Props> = (props: Props) => { const { backgroundColor, height, icon, imageFile, imageUrl, label, textColor, width, ...other } = props; return imageFile || imageUrl ? ( <MuiAvatar src={imageFile || imageUrl} style={{ height, width }} {...other} /> ) : ( <MuiAvatar style={{ color: textColor, backgroundColor, height, width }} {...other}> {icon === '' ? label : <Icon icon={icon} />} </MuiAvatar> ); }; Avatar.defaultProps = defaultProps; addPropertyControls(Avatar, { variant: { type: ControlType.Enum, title: 'Variant', options: ['circle', 'circular', 'rounded', 'square'], }, backgroundColor: { type: ControlType.Color, title: 'Background color', }, textColor: { type: ControlType.Color, title: 'Text color', }, icon: { type: ControlType.String, title: 'Icon', }, imageFile: { type: ControlType.Image,<|fim▁hole|> return props.primaryAction && props.primaryAction !== 'avatar'; }, }, imageUrl: { type: ControlType.String, title: 'Image URL', hidden: function hidden(props) { return props.imageFile !== '' || (props.primaryAction && props.primaryAction !== 'avatar'); }, }, label: { type: ControlType.String, title: 'Label', }, });<|fim▁end|>
title: 'Image File', hidden: function hidden(props) {
<|file_name|>VectorE2.d.ts<|end_file_name|><|fim▁begin|>/** * @hidden<|fim▁hole|>export interface VectorE2 { /** * The Cartesian x-coordinate or <em>abscissa</em>. */ x: number; /** * The Cartesian y-coordinate or <em>ordinate</em>. */ y: number; }<|fim▁end|>
*/
<|file_name|>FlatSliderSimple2d.cpp<|end_file_name|><|fim▁begin|>/* ****************************************************************** ** ** OpenSees - Open System for Earthquake Engineering Simulation ** ** Pacific Earthquake Engineering Research Center ** ** ** ** ** ** (C) Copyright 1999, The Regents of the University of California ** ** All Rights Reserved. ** ** ** ** Commercial use of this program without express permission of the ** ** University of California, Berkeley, is strictly prohibited. See ** ** file 'COPYRIGHT' in main directory for information on usage and ** ** redistribution, and for a DISCLAIMER OF ALL WARRANTIES. ** ** ** ** Developed by: ** ** Frank McKenna ([email protected]) ** ** Gregory L. Fenves ([email protected]) ** ** Filip C. Filippou ([email protected]) ** ** ** ** ****************************************************************** */ // $Revision: 1.2 $ // $Date: 2009/11/03 23:12:33 $ // $Source: /usr/local/cvs/OpenSees/SRC/element/special/frictionBearing/FlatSliderSimple2d.cpp,v $ // Written: Andreas Schellenberg ([email protected]) // Created: 02/06 // Revision: A // // Description: This file contains the implementation of the // FlatSliderSimple2d class. #include "FlatSliderSimple2d.h" #include "domain/domain/Domain.h" #include "domain/mesh/node/Node.h" #include "utility/actor/objectBroker/FEM_ObjectBroker.h" #include "utility/recorder/response/ElementResponse.h" #include "frictionModel/FrictionModel.h" #include "material/uniaxial/UniaxialMaterial.h" #include <cfloat> #include <cmath> #include <cstdlib> #include <cstring> // initialize the class wide variables XC::Matrix XC::FlatSliderSimple2d::theMatrix(6,6); XC::Vector XC::FlatSliderSimple2d::theVector(6); XC::FlatSliderSimple2d::FlatSliderSimple2d(int tag, int Nd1, int Nd2, FrictionModel &thefrnmdl, double _uy,const std::vector<UniaxialMaterial *> &materials, const Vector _y, const Vector _x, double m, int maxiter, double _tol) : FrictionElementBase(tag, ELE_TAG_FlatSliderSimple2d,Nd1,Nd2,3,thefrnmdl,UniaxialMatPhysicalProperties(materials),_uy,_x,_y,m,maxiter,tol), ubPlastic(0.0), ubPlasticC(0.0) { load.reset(6); assert(materials.size()==2); // initialize initial stiffness matrix kbInit.Zero(); kbInit(0,0) = physicalProperties[0]->getInitialTangent(); kbInit(1,1) = kbInit(0,0)*DBL_EPSILON; kbInit(2,2) = physicalProperties[1]->getInitialTangent(); // initialize other variables revertToStart(); } XC::FlatSliderSimple2d::FlatSliderSimple2d() : FrictionElementBase(ELE_TAG_FlatSliderSimple2d,3), ubPlastic(0.0), ubPlasticC(0.0) {load.reset(6);} int XC::FlatSliderSimple2d::getNumDOF() { return 6; } void XC::FlatSliderSimple2d::setDomain(Domain *theDomain) { FrictionElementBase::setDomain(theDomain); // now determine the number of dof and the dimension const int dofNd1 = theNodes[0]->getNumberDOF(); const int dofNd2 = theNodes[1]->getNumberDOF(); // if differing dof at the ends - print a warning message if(dofNd1 != 3) { std::cerr << "FlatSliderSimple2d::setDomain() - node 1: " << " has incorrect number of DOF (not 3)\n";<|fim▁hole|> if(dofNd2 != 3) { std::cerr << "FlatSliderSimple2d::setDomain() - node 2: " << " has incorrect number of DOF (not 3)\n"; return; } // set up the transformation matrix for orientation this->setUp(); } int XC::FlatSliderSimple2d::commitState() { int errCode = 0; ubPlasticC = ubPlastic;// commit trial history variables errCode += theFrnMdl->commitState();// commit friction model errCode += physicalProperties.commitState();// commit material models return errCode; } int XC::FlatSliderSimple2d::revertToLastCommit() { int errCode = 0; errCode += theFrnMdl->revertToLastCommit();// revert friction model errCode += physicalProperties.revertToLastCommit();// revert material models return errCode; } int XC::FlatSliderSimple2d::revertToStart() { int errCode = 0; // reset trial history variables ub.Zero(); ubPlastic = 0.0; qb.Zero(); // reset committed history variables ubPlasticC = 0.0; // reset stiffness matrix in basic system kb = kbInit; // revert friction model errCode += theFrnMdl->revertToStart(); errCode += physicalProperties.revertToStart();// revert material models return errCode; } int XC::FlatSliderSimple2d::update() { // get global trial displacements and velocities const Vector &dsp1 = theNodes[0]->getTrialDisp(); const Vector &dsp2 = theNodes[1]->getTrialDisp(); const Vector &vel1 = theNodes[0]->getTrialVel(); const Vector &vel2 = theNodes[1]->getTrialVel(); static Vector ug(6), ugdot(6), uldot(6), ubdot(3); for (int i=0; i<3; i++) { ug(i) = dsp1(i); ugdot(i) = vel1(i); ug(i+3) = dsp2(i); ugdot(i+3) = vel2(i); } // transform response from the global to the local system ul = Tgl*ug; uldot = Tgl*ugdot; // transform response from the local to the basic system ub = Tlb*ul; ubdot = Tlb*uldot; // get absolute velocity double ubdotAbs = ubdot(1); // 1) get axial force and stiffness in basic x-direction double ub0Old = physicalProperties[0]->getStrain(); physicalProperties[0]->setTrialStrain(ub(0),ubdot(0)); qb(0) = physicalProperties[0]->getStress(); kb(0,0) = physicalProperties[0]->getTangent(); // check for uplift if (qb(0) >= 0.0) { kb = kbInit; if (qb(0) > 0.0) { physicalProperties[0]->setTrialStrain(ub0Old,0.0); kb(0,0) *= DBL_EPSILON; } qb.Zero(); return 0; } // 2) calculate shear force and stiffness in basic y-direction int iter = 0; double qb1Old = 0.0; do { // save old shear force qb1Old = qb(1); // get normal and friction (yield) forces double N = -qb(0) - qb(1)*ul(2); theFrnMdl->setTrial(N, ubdotAbs); double qYield = (theFrnMdl->getFrictionForce()); // get initial stiffness of hysteretic component double k0 = qYield/uy; // get trial shear force of hysteretic component double qTrial = k0*(ub(1) - ubPlasticC); // compute yield criterion of hysteretic component double qTrialNorm = fabs(qTrial); double Y = qTrialNorm - qYield; // elastic step -> no updates required if (Y <= 0.0) { // set shear force qb(1) = qTrial - N*ul(2); // set tangent stiffness kb(1,1) = k0; } // plastic step -> return mapping else { // compute consistency parameter double dGamma = Y/k0; // update plastic displacement ubPlastic = ubPlasticC + dGamma*qTrial/qTrialNorm; // set shear force qb(1) = qYield*qTrial/qTrialNorm - N*ul(2); // set tangent stiffness kb(1,1) = 0.0; } iter++; } while ((fabs(qb(1)-qb1Old) >= tol) && (iter <= maxIter)); // issue warning if iteration did not converge if (iter >= maxIter) { std::cerr << "WARNING: XC::FlatSliderSimple2d::update() - did not find the shear force after " << iter << " iterations and norm: " << fabs(qb(1)-qb1Old) << std::endl; return -1; } // 3) get moment and stiffness in basic z-direction physicalProperties[1]->setTrialStrain(ub(2),ubdot(2)); qb(2) = physicalProperties[1]->getStress(); kb(2,2) = physicalProperties[1]->getTangent(); return 0; } const XC::Matrix &XC::FlatSliderSimple2d::getTangentStiff() { // zero the matrix theMatrix.Zero(); // transform from basic to local system static Matrix kl(6,6); kl.addMatrixTripleProduct(0.0, Tlb, kb, 1.0); // add geometric stiffness to local stiffness kl(2,1) -= 1.0*qb(0); kl(2,4) += 1.0*qb(0); //kl(5,1) -= 0.0*qb(0); //kl(5,4) += 0.0*qb(0); // transform from local to global system theMatrix.addMatrixTripleProduct(0.0, Tgl, kl, 1.0); return theMatrix; } const XC::Matrix &XC::FlatSliderSimple2d::getInitialStiff() { // zero the matrix theMatrix.Zero(); // transform from basic to local system static Matrix kl(6,6); kl.addMatrixTripleProduct(0.0, Tlb, kbInit, 1.0); // transform from local to global system theMatrix.addMatrixTripleProduct(0.0, Tgl, kl, 1.0); return theMatrix; } const XC::Matrix &XC::FlatSliderSimple2d::getMass() { // zero the matrix theMatrix.Zero(); // check for quick return if (mass == 0.0) { return theMatrix; } double m = 0.5*mass; for (int i=0; i<2; i++) { theMatrix(i,i) = m; theMatrix(i+3,i+3) = m; } return theMatrix; } int XC::FlatSliderSimple2d::addLoad(ElementalLoad *theLoad, double loadFactor) { std::cerr <<"XC::FlatSliderSimple2d::addLoad() - " << "load type unknown for element: " << this->getTag() << std::endl; return -1; } int XC::FlatSliderSimple2d::addInertiaLoadToUnbalance(const Vector &accel) { // check for quick return if (mass == 0.0) { return 0; } // get R * accel from the nodes const Vector &Raccel1 = theNodes[0]->getRV(accel); const Vector &Raccel2 = theNodes[1]->getRV(accel); if (3 != Raccel1.Size() || 3 != Raccel2.Size()) { std::cerr << "XC::FlatSliderSimple2d::addInertiaLoadToUnbalance() - " << "matrix and vector sizes are incompatible\n"; return -1; } // want to add ( - fact * M R * accel ) to unbalance // take advantage of lumped mass matrix double m = 0.5*mass; for (int i=0; i<2; i++) { load(i) -= m * Raccel1(i); load(i+3) -= m * Raccel2(i); } return 0; } const XC::Vector& XC::FlatSliderSimple2d::getResistingForce() { // zero the residual theVector.Zero(); // determine resisting forces in local system static Vector ql(6); ql = Tlb^qb; // add P-Delta moments to local forces double MpDelta = qb(0)*(ul(4)-ul(1)); ql(2) += 1.0*MpDelta; //ql(5) += 0.0*MpDelta; // determine resisting forces in global system theVector = Tgl^ql; // subtract external load theVector.addVector(1.0, load, -1.0); return theVector; } const XC::Vector& XC::FlatSliderSimple2d::getResistingForceIncInertia() { theVector = this->getResistingForce(); // add the damping forces if rayleigh damping if(!rayFactors.nullValues()) theVector += this->getRayleighDampingForces(); // now include the mass portion if(mass != 0.0) { const Vector &accel1 = theNodes[0]->getTrialAccel(); const Vector &accel2 = theNodes[1]->getTrialAccel(); const double m = 0.5*mass; for(int i=0; i<2; i++) { theVector(i)+= m * accel1(i); theVector(i+3)+= m * accel2(i); } } return theVector; } //! @brief Send members through the channel being passed as parameter. int XC::FlatSliderSimple2d::sendData(CommParameters &cp) { int res= FrictionElementBase::sendData(cp); res+= cp.sendDoubles(ubPlastic,ubPlasticC,getDbTagData(),CommMetaData(19)); return res; } //! @brief Receives members through the channel being passed as parameter. int XC::FlatSliderSimple2d::recvData(const CommParameters &cp) { int res= FrictionElementBase::recvData(cp); res+= cp.receiveDoubles(ubPlastic,ubPlasticC,getDbTagData(),CommMetaData(19)); return res; } int XC::FlatSliderSimple2d::sendSelf(CommParameters &cp) { inicComm(20); int res= sendData(cp); const int dataTag= getDbTag(); res += cp.sendIdData(getDbTagData(),dataTag); if(res < 0) std::cerr << "ZeroLength::sendSelf -- failed to send ID data\n"; return res; } int XC::FlatSliderSimple2d::recvSelf(const CommParameters &cp) { inicComm(20); const int dataTag= getDbTag(); int res= cp.receiveIdData(getDbTagData(),dataTag); if(res<0) std::cerr << "ZeroLength::recvSelf -- failed to receive ID data\n"; else res+= recvData(cp); return res; } void XC::FlatSliderSimple2d::Print(std::ostream &s, int flag) { if (flag == 0) { // print everything s << "Element: " << this->getTag(); //s << " type: FlatSliderSimple2d iNode: " << connectedExternalNodes(0); //s << " jNode: " << connectedExternalNodes(1) << std::endl; s << " FrictionModel: " << theFrnMdl->getTag() << std::endl; s << " uy: " << uy << std::endl; s << " Material ux: " << physicalProperties[0]->getTag() << std::endl; s << " Material rz: " << physicalProperties[1]->getTag() << std::endl; s << " mass: " << mass << " maxIter: " << maxIter << " tol: " << tol << std::endl; // determine resisting forces in global system s << " resisting force: " << this->getResistingForce() << std::endl; } else if (flag == 1) { // does nothing } } XC::Response *XC::FlatSliderSimple2d::setResponse(const std::vector<std::string> &argv, Information &eleInformation) { Response *theResponse = 0; // output.tag("ElementOutput"); // output.attr("eleType","FlatSliderSimple2d"); // output.attr("eleTag",this->getTag()); // output.attr("node1",connectedExternalNodes[0]); // output.attr("node2",connectedExternalNodes[1]); // // global forces // if (strcmp(argv[0],"force") == 0 || // strcmp(argv[0],"forces") == 0 || // strcmp(argv[0],"globalForce") == 0 || // strcmp(argv[0],"globalForces") == 0) // { // output.tag("ResponseType","Px_1"); // output.tag("ResponseType","Py_1"); // output.tag("ResponseType","Mz_1"); // output.tag("ResponseType","Px_2"); // output.tag("ResponseType","Py_2"); // output.tag("ResponseType","Mz_2"); // theResponse = new ElementResponse(this, 1, theVector); // } // // local forces // else if (strcmp(argv[0],"localForce") == 0 || // strcmp(argv[0],"localForces") == 0) // { // output.tag("ResponseType","N_1"); // output.tag("ResponseType","V_1"); // output.tag("ResponseType","M_1"); // output.tag("ResponseType","N_2"); // output.tag("ResponseType","V_2"); // output.tag("ResponseType","M_2"); // theResponse = new ElementResponse(this, 2, theVector); // } // // basic forces // else if (strcmp(argv[0],"basicForce") == 0 || // strcmp(argv[0],"basicForces") == 0) // { // output.tag("ResponseType","qb1"); // output.tag("ResponseType","qb2"); // output.tag("ResponseType","qb3"); // theResponse = new ElementResponse(this, 3, Vector(3)); // } // // local displacements // else if (strcmp(argv[0],"localDisplacement") == 0 || // strcmp(argv[0],"localDisplacements") == 0) // { // output.tag("ResponseType","ux_1"); // output.tag("ResponseType","uy_1"); // output.tag("ResponseType","rz_1"); // output.tag("ResponseType","ux_2"); // output.tag("ResponseType","uy_2"); // output.tag("ResponseType","rz_2"); // theResponse = new ElementResponse(this, 4, theVector); // } // // basic displacements // else if (strcmp(argv[0],"deformation") == 0 || // strcmp(argv[0],"deformations") == 0 || // strcmp(argv[0],"basicDeformation") == 0 || // strcmp(argv[0],"basicDeformations") == 0 || // strcmp(argv[0],"basicDisplacement") == 0 || // strcmp(argv[0],"basicDisplacements") == 0) // { // output.tag("ResponseType","ub1"); // output.tag("ResponseType","ub2"); // output.tag("ResponseType","ub3"); // theResponse = new ElementResponse(this, 5, Vector(3)); // } // // material output // else if (strcmp(argv[0],"material") == 0) { // if (argc > 2) { // int matNum = atoi(argv[1]); // if (matNum >= 1 && matNum <= 2) // theResponse = physicalProperties[matNum-1]->setResponse(&argv[2], argc-2, output); // } // } // output.endTag(); // ElementOutput return theResponse; } int XC::FlatSliderSimple2d::getResponse(int responseID, Information &eleInfo) { double MpDelta; switch (responseID) { case 1: // global forces return eleInfo.setVector(this->getResistingForce()); case 2: // local forces theVector.Zero(); // determine resisting forces in local system theVector = Tlb^qb; // add P-Delta moments MpDelta = qb(0)*(ul(4)-ul(1)); theVector(2) += 1.0*MpDelta; //theVector(5) += 0.0*MpDelta; return eleInfo.setVector(theVector); case 3: // basic forces return eleInfo.setVector(qb); case 4: // local displacements return eleInfo.setVector(ul); case 5: // basic displacements return eleInfo.setVector(ub); default: return -1; } } // establish the external nodes and set up the transformation matrix for orientation void XC::FlatSliderSimple2d::setUp() { const Vector &end1Crd = theNodes[0]->getCrds(); const Vector &end2Crd = theNodes[1]->getCrds(); Vector xp = end2Crd - end1Crd; L = xp.Norm(); if (L > DBL_EPSILON) { if (x.Size() == 0) { x.resize(3); x(0) = xp(0); x(1) = xp(1); x(2) = 0.0; y.resize(3); y(0) = -x(1); y(1) = x(0); y(2) = 0.0; } else { std::cerr << "WARNING XC::FlatSliderSimple2d::setUp() - " << "element: " << this->getTag() << std::endl << "ignoring nodes and using specified " << "local x vector to determine orientation\n"; } } // check that vectors for orientation are of correct size if (x.Size() != 3 || y.Size() != 3) { std::cerr << "XC::FlatSliderSimple2d::setUp() - " << "element: " << this->getTag() << std::endl << "incorrect dimension of orientation vectors\n"; exit(-1); } // establish orientation of element for the tranformation matrix // z = x cross y Vector z(3); z(0) = x(1)*y(2) - x(2)*y(1); z(1) = x(2)*y(0) - x(0)*y(2); z(2) = x(0)*y(1) - x(1)*y(0); // y = z cross x y(0) = z(1)*x(2) - z(2)*x(1); y(1) = z(2)*x(0) - z(0)*x(2); y(2) = z(0)*x(1) - z(1)*x(0); // compute length(norm) of vectors double xn = x.Norm(); double yn = y.Norm(); double zn = z.Norm(); // check valid x and y vectors, i.e. not parallel and of zero length if (xn == 0 || yn == 0 || zn == 0) { std::cerr << "XC::FlatSliderSimple2d::setUp() - " << "element: " << this->getTag() << std::endl << "invalid orientation vectors\n"; exit(-1); } // create transformation matrix from global to local system Tgl.Zero(); Tgl(0,0) = Tgl(3,3) = x(0)/xn; Tgl(0,1) = Tgl(3,4) = x(1)/xn; Tgl(1,0) = Tgl(4,3) = y(0)/yn; Tgl(1,1) = Tgl(4,4) = y(1)/yn; Tgl(2,2) = Tgl(5,5) = z(2)/zn; // create transformation matrix from local to basic system (linear) Tlb.Zero(); Tlb(0,0) = Tlb(1,1) = Tlb(2,2) = -1.0; Tlb(0,3) = Tlb(1,4) = Tlb(2,5) = 1.0; Tlb(1,5) = -L; } double XC::FlatSliderSimple2d::sgn(double x) { if(x > 0) return 1.0; else if (x < 0) return -1.0; else return 0.0; }<|fim▁end|>
return; }
<|file_name|>cguitreedomdocument.cpp<|end_file_name|><|fim▁begin|>#include <stdio.h> #include <QtDebug><|fim▁hole|> QDomImplementation impl; impl.setInvalidDataPolicy(QDomImplementation::ReturnNullNode); } /** * Get first "guiObject" located in "guiRoot". * * @return Node element of first guiObject or an empty element node if there is none. **/ CGuiTreeDomElement CGuiTreeDomDocument::getFirstGuiObjectElement() { CGuiTreeDomElement domElmGuiTree; domElmGuiTree = this->firstChildElement("guiRoot"); if(domElmGuiTree.isNull()) return(domElmGuiTree); return(domElmGuiTree.firstChildElement("guiObject")); }<|fim▁end|>
#include "cguitreedomdocument.h" CGuiTreeDomDocument::CGuiTreeDomDocument() {
<|file_name|>EventsMap.java<|end_file_name|><|fim▁begin|>/* ********************************************************************** ** ** Copyright notice ** ** ** ** (c) 2005-2009 RSSOwl Development Team ** ** http://www.rssowl.org/ ** ** ** ** All rights reserved ** ** ** ** This program and the accompanying materials are made available under ** ** the terms of the Eclipse Public License v1.0 which accompanies this ** ** distribution, and is available at: ** ** http://www.rssowl.org/legal/epl-v10.html ** ** ** ** A copy is found in the file epl-v10.html and important notices to the ** ** license from the team is found in the textfile LICENSE.txt distributed ** ** in this package. ** ** ** ** This copyright notice MUST APPEAR in all copies of the file! ** ** ** ** Contributors: ** ** RSSOwl Development Team - initial API and implementation ** ** ** ** ********************************************************************** */ package org.rssowl.core.internal.persist.service; import org.rssowl.core.persist.IEntity; import org.rssowl.core.persist.event.ModelEvent; import org.rssowl.core.persist.event.runnable.EventRunnable; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.IdentityHashMap; import java.util.List; import java.util.Map; /** * A {@link Map} of {@link ModelEvent} pointing to {@link EventRunnable}. */ public class EventsMap { private static final EventsMap INSTANCE = new EventsMap(); private static class InternalMap extends HashMap<Class<? extends ModelEvent>, EventRunnable<? extends ModelEvent>> { InternalMap() { super(); } } private final ThreadLocal<InternalMap> fEvents = new ThreadLocal<InternalMap>(); private final ThreadLocal<Map<IEntity, ModelEvent>> fEventTemplatesMap = new ThreadLocal<Map<IEntity, ModelEvent>>(); private EventsMap() { // Enforce singleton pattern } public final static EventsMap getInstance() { return INSTANCE; } public final void putPersistEvent(ModelEvent event) { EventRunnable<? extends ModelEvent> eventRunnable = getEventRunnable(event); eventRunnable.addCheckedPersistEvent(event); } public final void putUpdateEvent(ModelEvent event) { EventRunnable<? extends ModelEvent> eventRunnable = getEventRunnable(event); eventRunnable.addCheckedUpdateEvent(event); } public final void putRemoveEvent(ModelEvent event) { EventRunnable<? extends ModelEvent> eventRunnable = getEventRunnable(event); eventRunnable.addCheckedRemoveEvent(event); } public final boolean containsPersistEvent(Class<? extends ModelEvent> eventClass, IEntity entity) { EventRunnable<? extends ModelEvent> eventRunnable = getEventRunnable(eventClass); return eventRunnable.getPersistEvents().contains(entity); } public final boolean containsUpdateEvent(Class<? extends ModelEvent> eventClass, IEntity entity) { EventRunnable<? extends ModelEvent> eventRunnable = getEventRunnable(eventClass); return eventRunnable.getUpdateEvents().contains(entity); } public final boolean containsRemoveEvent(Class<? extends ModelEvent> eventClass, IEntity entity) { EventRunnable<? extends ModelEvent> eventRunnable = getEventRunnable(eventClass); return eventRunnable.getRemoveEvents().contains(entity); } private EventRunnable<? extends ModelEvent> getEventRunnable(Class<? extends ModelEvent> eventClass) { InternalMap map = fEvents.get(); if (map == null) { map = new InternalMap(); fEvents.set(map); } EventRunnable<? extends ModelEvent> eventRunnable = map.get(eventClass); return eventRunnable; } private EventRunnable<? extends ModelEvent> getEventRunnable(ModelEvent event) { Class<? extends ModelEvent> eventClass = event.getClass(); EventRunnable<? extends ModelEvent> eventRunnable = getEventRunnable(eventClass); if (eventRunnable == null) { eventRunnable = event.createEventRunnable(); fEvents.get().put(eventClass, eventRunnable); } return eventRunnable; } public EventRunnable<? extends ModelEvent> removeEventRunnable(Class<? extends ModelEvent> klass) { InternalMap map = fEvents.get(); if (map == null) return null; EventRunnable<? extends ModelEvent> runnable = map.remove(klass); return runnable; } public List<EventRunnable<?>> getEventRunnables() { InternalMap map = fEvents.get(); if (map == null) return new ArrayList<EventRunnable<?>>(0); List<EventRunnable<?>> eventRunnables = new ArrayList<EventRunnable<?>>(map.size()); for (Map.Entry<Class<? extends ModelEvent>, EventRunnable<? extends ModelEvent>> entry : map.entrySet()) { eventRunnables.add(entry.getValue()); } return eventRunnables; } public List<EventRunnable<?>> removeEventRunnables() { InternalMap map = fEvents.get(); if (map == null) return new ArrayList<EventRunnable<?>>(0); List<EventRunnable<?>> eventRunnables = getEventRunnables(); map.clear(); return eventRunnables; } public void putEventTemplate(ModelEvent event) { Map<IEntity, ModelEvent> map = fEventTemplatesMap.get(); if (map == null) { map = new IdentityHashMap<IEntity, ModelEvent>(); fEventTemplatesMap.set(map); } map.put(event.getEntity(), event); } public final Map<IEntity, ModelEvent> getEventTemplatesMap() { Map<IEntity, ModelEvent> map = fEventTemplatesMap.get(); if (map == null) return Collections.emptyMap(); return Collections.unmodifiableMap(fEventTemplatesMap.get()); } public Map<IEntity, ModelEvent> removeEventTemplatesMap() { Map<IEntity, ModelEvent> map = fEventTemplatesMap.get(); fEventTemplatesMap.remove(); return map;<|fim▁hole|> } }<|fim▁end|>
<|file_name|>test_dns.py<|end_file_name|><|fim▁begin|># test-case-name: twisted.names.test.test_dns # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for twisted.names.dns. """ from __future__ import division, absolute_import from io import BytesIO import struct from zope.interface.verify import verifyClass from twisted.python.failure import Failure from twisted.python.util import FancyEqMixin, FancyStrMixin from twisted.internet import address, task from twisted.internet.error import CannotListenError, ConnectionDone from twisted.trial import unittest from twisted.names import dns from twisted.test import proto_helpers from twisted.test.testutils import ComparisonTestsMixin RECORD_TYPES = [ dns.Record_NS, dns.Record_MD, dns.Record_MF, dns.Record_CNAME, dns.Record_MB, dns.Record_MG, dns.Record_MR, dns.Record_PTR, dns.Record_DNAME, dns.Record_A, dns.Record_SOA, dns.Record_NULL, dns.Record_WKS, dns.Record_SRV, dns.Record_AFSDB, dns.Record_RP, dns.Record_HINFO, dns.Record_MINFO, dns.Record_MX, dns.Record_TXT, dns.Record_AAAA, dns.Record_A6, dns.Record_NAPTR, dns.UnknownRecord, ] class Ord2ByteTests(unittest.TestCase): """ Tests for L{dns._ord2bytes}. """ def test_ord2byte(self): """ L{dns._ord2byte} accepts an integer and returns a byte string of length one with an ordinal value equal to the given integer. """ self.assertEqual(b'\x10', dns._ord2bytes(0x10)) class Str2TimeTests(unittest.TestCase): """ Tests for L{dns.str2name}. """ def test_nonString(self): """ When passed a non-string object, L{dns.str2name} returns it unmodified. """ time = object() self.assertIs(time, dns.str2time(time)) def test_seconds(self): """ Passed a string giving a number of seconds, L{dns.str2time} returns the number of seconds represented. For example, C{"10S"} represents C{10} seconds. """ self.assertEqual(10, dns.str2time("10S")) def test_minutes(self): """ Like C{test_seconds}, but for the C{"M"} suffix which multiplies the time value by C{60} (the number of seconds in a minute!). """ self.assertEqual(2 * 60, dns.str2time("2M")) def test_hours(self): """ Like C{test_seconds}, but for the C{"H"} suffix which multiplies the time value by C{3600}, the number of seconds in an hour. """ self.assertEqual(3 * 3600, dns.str2time("3H")) def test_days(self): """ Like L{test_seconds}, but for the C{"D"} suffix which multiplies the time value by C{86400}, the number of seconds in a day. """ self.assertEqual(4 * 86400, dns.str2time("4D")) def test_weeks(self): """ Like L{test_seconds}, but for the C{"W"} suffix which multiplies the time value by C{604800}, the number of seconds in a week. """ self.assertEqual(5 * 604800, dns.str2time("5W")) def test_years(self): """ Like L{test_seconds}, but for the C{"Y"} suffix which multiplies the time value by C{31536000}, the number of seconds in a year. """ self.assertEqual(6 * 31536000, dns.str2time("6Y")) def test_invalidPrefix(self): """ If a non-integer prefix is given, L{dns.str2time} raises L{ValueError}. """ self.assertRaises(ValueError, dns.str2time, "fooS") class NameTests(unittest.TestCase): """ Tests for L{Name}, the representation of a single domain name with support for encoding into and decoding from DNS message format. """ def test_nonStringName(self): """ When constructed with a name which is neither C{bytes} nor C{str}, L{Name} raises L{TypeError}. """ self.assertRaises(TypeError, dns.Name, 123) self.assertRaises(TypeError, dns.Name, object()) self.assertRaises(TypeError, dns.Name, []) def test_unicodeName(self): """ L{dns.Name} automatically encodes unicode domain name using C{idna} encoding. """ name = dns.Name(u'\u00e9chec.example.org') self.assertIsInstance(name.name, bytes) self.assertEqual(b'xn--chec-9oa.example.org', name.name) def test_decode(self): """ L{Name.decode} populates the L{Name} instance with name information read from the file-like object passed to it. """ n = dns.Name() n.decode(BytesIO(b"\x07example\x03com\x00")) self.assertEqual(n.name, b"example.com") def test_encode(self): """ L{Name.encode} encodes its name information and writes it to the file-like object passed to it. """ name = dns.Name(b"foo.example.com") stream = BytesIO() name.encode(stream) self.assertEqual(stream.getvalue(), b"\x03foo\x07example\x03com\x00") def test_encodeWithCompression(self): """ If a compression dictionary is passed to it, L{Name.encode} uses offset information from it to encode its name with references to existing labels in the stream instead of including another copy of them in the output. It also updates the compression dictionary with the location of the name it writes to the stream. """ name = dns.Name(b"foo.example.com") compression = {b"example.com": 0x17} # Some bytes already encoded into the stream for this message previous = b"some prefix to change .tell()" stream = BytesIO() stream.write(previous) # The position at which the encoded form of this new name will appear in # the stream. expected = len(previous) + dns.Message.headerSize name.encode(stream, compression) self.assertEqual( b"\x03foo\xc0\x17", stream.getvalue()[len(previous):]) self.assertEqual( {b"example.com": 0x17, b"foo.example.com": expected}, compression) def test_unknown(self): """ A resource record of unknown type and class is parsed into an L{UnknownRecord} instance with its data preserved, and an L{UnknownRecord} instance is serialized to a string equal to the one it was parsed from. """ wire = ( b'\x01\x00' # Message ID b'\x00' # answer bit, opCode nibble, auth bit, trunc bit, recursive # bit b'\x00' # recursion bit, empty bit, authenticData bit, # checkingDisabled bit, response code nibble b'\x00\x01' # number of queries b'\x00\x01' # number of answers b'\x00\x00' # number of authorities b'\x00\x01' # number of additionals # query b'\x03foo\x03bar\x00' # foo.bar b'\xde\xad' # type=0xdead b'\xbe\xef' # cls=0xbeef # 1st answer b'\xc0\x0c' # foo.bar - compressed b'\xde\xad' # type=0xdead b'\xbe\xef' # cls=0xbeef b'\x00\x00\x01\x01' # ttl=257 b'\x00\x08somedata' # some payload data # 1st additional b'\x03baz\x03ban\x00' # baz.ban b'\x00\x01' # type=A b'\x00\x01' # cls=IN b'\x00\x00\x01\x01' # ttl=257 b'\x00\x04' # len=4 b'\x01\x02\x03\x04' # 1.2.3.4 ) msg = dns.Message() msg.fromStr(wire) self.assertEqual(msg.queries, [ dns.Query(b'foo.bar', type=0xdead, cls=0xbeef), ]) self.assertEqual(msg.answers, [ dns.RRHeader(b'foo.bar', type=0xdead, cls=0xbeef, ttl=257, payload=dns.UnknownRecord(b'somedata', ttl=257)), ]) self.assertEqual(msg.additional, [ dns.RRHeader(b'baz.ban', type=dns.A, cls=dns.IN, ttl=257, payload=dns.Record_A('1.2.3.4', ttl=257)), ]) enc = msg.toStr() self.assertEqual(enc, wire) def test_decodeWithCompression(self): """ If the leading byte of an encoded label (in bytes read from a stream passed to L{Name.decode}) has its two high bits set, the next byte is treated as a pointer to another label in the stream and that label is included in the name being decoded. """ # Slightly modified version of the example from RFC 1035, section 4.1.4. stream = BytesIO( b"x" * 20 + b"\x01f\x03isi\x04arpa\x00" b"\x03foo\xc0\x14" b"\x03bar\xc0\x20") stream.seek(20) name = dns.Name() name.decode(stream) # Verify we found the first name in the stream and that the stream # position is left at the first byte after the decoded name. self.assertEqual(b"f.isi.arpa", name.name) self.assertEqual(32, stream.tell()) # Get the second name from the stream and make the same assertions. name.decode(stream) self.assertEqual(name.name, b"foo.f.isi.arpa") self.assertEqual(38, stream.tell()) # Get the third and final name name.decode(stream) self.assertEqual(name.name, b"bar.foo.f.isi.arpa") self.assertEqual(44, stream.tell()) def test_rejectCompressionLoop(self): """ L{Name.decode} raises L{ValueError} if the stream passed to it includes a compression pointer which forms a loop, causing the name to be undecodable. """ name = dns.Name() stream = BytesIO(b"\xc0\x00") self.assertRaises(ValueError, name.decode, stream) def test_equality(self): """ L{Name} instances are equal as long as they have the same value for L{Name.name}, regardless of the case. """ name1 = dns.Name(b"foo.bar") name2 = dns.Name(b"foo.bar") self.assertEqual(name1, name2) name3 = dns.Name(b"fOO.bar") self.assertEqual(name1, name3) def test_inequality(self): """ L{Name} instances are not equal as long as they have different L{Name.name} attributes. """ name1 = dns.Name(b"foo.bar") name2 = dns.Name(b"bar.foo") self.assertNotEqual(name1, name2) class RoundtripDNSTests(unittest.TestCase): """ Encoding and then decoding various objects. """ names = [b"example.org", b"go-away.fish.tv", b"23strikesback.net"] def test_name(self): for n in self.names: # encode the name f = BytesIO() dns.Name(n).encode(f) # decode the name f.seek(0, 0) result = dns.Name() result.decode(f) self.assertEqual(result.name, n) def test_query(self): """ L{dns.Query.encode} returns a byte string representing the fields of the query which can be decoded into a new L{dns.Query} instance using L{dns.Query.decode}. """ for n in self.names: for dnstype in range(1, 17): for dnscls in range(1, 5): # encode the query f = BytesIO() dns.Query(n, dnstype, dnscls).encode(f) # decode the result f.seek(0, 0) result = dns.Query() result.decode(f) self.assertEqual(result.name.name, n) self.assertEqual(result.type, dnstype) self.assertEqual(result.cls, dnscls) def test_resourceRecordHeader(self): """ L{dns.RRHeader.encode} encodes the record header's information and writes it to the file-like object passed to it and L{dns.RRHeader.decode} reads from a file-like object to re-construct a L{dns.RRHeader} instance. """ # encode the RR f = BytesIO() dns.RRHeader(b"test.org", 3, 4, 17).encode(f) # decode the result f.seek(0, 0) result = dns.RRHeader() result.decode(f) self.assertEqual(result.name, dns.Name(b"test.org")) self.assertEqual(result.type, 3) self.assertEqual(result.cls, 4) self.assertEqual(result.ttl, 17) def test_resources(self): """ L{dns.SimpleRecord.encode} encodes the record's name information and writes it to the file-like object passed to it and L{dns.SimpleRecord.decode} reads from a file-like object to re-construct a L{dns.SimpleRecord} instance. """ names = ( b"this.are.test.name", b"will.compress.will.this.will.name.will.hopefully", b"test.CASE.preSErVatIOn.YeAH", b"a.s.h.o.r.t.c.a.s.e.t.o.t.e.s.t", b"singleton" ) for s in names: f = BytesIO() dns.SimpleRecord(s).encode(f) f.seek(0, 0) result = dns.SimpleRecord() result.decode(f) self.assertEqual(result.name, dns.Name(s)) def test_hashable(self): """ Instances of all record types are hashable. """ for k in RECORD_TYPES: k1, k2 = k(), k() hk1 = hash(k1) hk2 = hash(k2) self.assertEqual(hk1, hk2, "%s != %s (for %s)" % (hk1,hk2,k)) def test_Charstr(self): """ Test L{dns.Charstr} encode and decode. """ for n in self.names: # encode the name f = BytesIO() dns.Charstr(n).encode(f) # decode the name f.seek(0, 0) result = dns.Charstr() result.decode(f) self.assertEqual(result.string, n) def _recordRoundtripTest(self, record): """ Assert that encoding C{record} and then decoding the resulting bytes creates a record which compares equal to C{record}. """ stream = BytesIO() record.encode(stream) length = stream.tell() stream.seek(0, 0) replica = record.__class__() replica.decode(stream, length) self.assertEqual(record, replica) def test_SOA(self): """ The byte stream written by L{dns.Record_SOA.encode} can be used by L{dns.Record_SOA.decode} to reconstruct the state of the original L{dns.Record_SOA} instance. """ self._recordRoundtripTest( dns.Record_SOA( mname=b'foo', rname=b'bar', serial=12, refresh=34, retry=56, expire=78, minimum=90)) def test_A(self): """ The byte stream written by L{dns.Record_A.encode} can be used by L{dns.Record_A.decode} to reconstruct the state of the original L{dns.Record_A} instance. """ self._recordRoundtripTest(dns.Record_A('1.2.3.4')) def test_NULL(self): """ The byte stream written by L{dns.Record_NULL.encode} can be used by L{dns.Record_NULL.decode} to reconstruct the state of the original L{dns.Record_NULL} instance. """ self._recordRoundtripTest(dns.Record_NULL(b'foo bar')) def test_WKS(self): """ The byte stream written by L{dns.Record_WKS.encode} can be used by L{dns.Record_WKS.decode} to reconstruct the state of the original L{dns.Record_WKS} instance. """ self._recordRoundtripTest(dns.Record_WKS('1.2.3.4', 3, b'xyz')) def test_AAAA(self): """ The byte stream written by L{dns.Record_AAAA.encode} can be used by L{dns.Record_AAAA.decode} to reconstruct the state of the original L{dns.Record_AAAA} instance. """ self._recordRoundtripTest(dns.Record_AAAA('::1')) def test_A6(self): """ The byte stream written by L{dns.Record_A6.encode} can be used by L{dns.Record_A6.decode} to reconstruct the state of the original L{dns.Record_A6} instance. """ self._recordRoundtripTest(dns.Record_A6(8, '::1:2', b'foo')) def test_SRV(self): """ The byte stream written by L{dns.Record_SRV.encode} can be used by L{dns.Record_SRV.decode} to reconstruct the state of the original L{dns.Record_SRV} instance. """ self._recordRoundtripTest(dns.Record_SRV( priority=1, weight=2, port=3, target=b'example.com')) def test_NAPTR(self): """ Test L{dns.Record_NAPTR} encode and decode. """ naptrs = [ (100, 10, b"u", b"sip+E2U", b"!^.*$!sip:[email protected]!", b""), (100, 50, b"s", b"http+I2L+I2C+I2R", b"", b"_http._tcp.gatech.edu")] for (order, preference, flags, service, regexp, replacement) in naptrs: rin = dns.Record_NAPTR(order, preference, flags, service, regexp, replacement) e = BytesIO() rin.encode(e) e.seek(0, 0) rout = dns.Record_NAPTR() rout.decode(e) self.assertEqual(rin.order, rout.order) self.assertEqual(rin.preference, rout.preference) self.assertEqual(rin.flags, rout.flags) self.assertEqual(rin.service, rout.service) self.assertEqual(rin.regexp, rout.regexp) self.assertEqual(rin.replacement.name, rout.replacement.name) self.assertEqual(rin.ttl, rout.ttl) def test_AFSDB(self): """ The byte stream written by L{dns.Record_AFSDB.encode} can be used by L{dns.Record_AFSDB.decode} to reconstruct the state of the original L{dns.Record_AFSDB} instance. """ self._recordRoundtripTest(dns.Record_AFSDB( subtype=3, hostname=b'example.com')) def test_RP(self): """ The byte stream written by L{dns.Record_RP.encode} can be used by L{dns.Record_RP.decode} to reconstruct the state of the original L{dns.Record_RP} instance. """ self._recordRoundtripTest(dns.Record_RP( mbox=b'alice.example.com', txt=b'example.com')) def test_HINFO(self): """ The byte stream written by L{dns.Record_HINFO.encode} can be used by L{dns.Record_HINFO.decode} to reconstruct the state of the original L{dns.Record_HINFO} instance. """ self._recordRoundtripTest(dns.Record_HINFO(cpu=b'fast', os=b'great')) def test_MINFO(self): """ The byte stream written by L{dns.Record_MINFO.encode} can be used by L{dns.Record_MINFO.decode} to reconstruct the state of the original L{dns.Record_MINFO} instance. """ self._recordRoundtripTest(dns.Record_MINFO( rmailbx=b'foo', emailbx=b'bar')) def test_MX(self): """ The byte stream written by L{dns.Record_MX.encode} can be used by L{dns.Record_MX.decode} to reconstruct the state of the original L{dns.Record_MX} instance. """ self._recordRoundtripTest(dns.Record_MX( preference=1, name=b'example.com')) def test_TXT(self): """ The byte stream written by L{dns.Record_TXT.encode} can be used by L{dns.Record_TXT.decode} to reconstruct the state of the original L{dns.Record_TXT} instance. """ self._recordRoundtripTest(dns.Record_TXT(b'foo', b'bar')) MESSAGE_AUTHENTIC_DATA_BYTES = ( b'\x00\x00' # ID b'\x00' # b'\x20' # RA, Z, AD=1, CD, RCODE b'\x00\x00' # Query count b'\x00\x00' # Answer count b'\x00\x00' # Authority count b'\x00\x00' # Additional count ) MESSAGE_CHECKING_DISABLED_BYTES = ( b'\x00\x00' # ID b'\x00' # b'\x10' # RA, Z, AD, CD=1, RCODE b'\x00\x00' # Query count b'\x00\x00' # Answer count b'\x00\x00' # Authority count b'\x00\x00' # Additional count ) class MessageTests(unittest.SynchronousTestCase): """ Tests for L{twisted.names.dns.Message}. """ def test_authenticDataDefault(self): """ L{dns.Message.authenticData} has default value 0. """ self.assertEqual(dns.Message().authenticData, 0) def test_authenticDataOverride(self): """ L{dns.Message.__init__} accepts a C{authenticData} argument which is assigned to L{dns.Message.authenticData}. """ self.assertEqual(dns.Message(authenticData=1).authenticData, 1) def test_authenticDataEncode(self): """ L{dns.Message.toStr} encodes L{dns.Message.authenticData} into byte4 of the byte string. """ self.assertEqual( dns.Message(authenticData=1).toStr(), MESSAGE_AUTHENTIC_DATA_BYTES ) def test_authenticDataDecode(self): """ L{dns.Message.fromStr} decodes byte4 and assigns bit3 to L{dns.Message.authenticData}. """ m = dns.Message() m.fromStr(MESSAGE_AUTHENTIC_DATA_BYTES) self.assertEqual(m.authenticData, 1) def test_checkingDisabledDefault(self): """ L{dns.Message.checkingDisabled} has default value 0. """ self.assertEqual(dns.Message().checkingDisabled, 0) def test_checkingDisabledOverride(self): """ L{dns.Message.__init__} accepts a C{checkingDisabled} argument which is assigned to L{dns.Message.checkingDisabled}. """ self.assertEqual( dns.Message(checkingDisabled=1).checkingDisabled, 1) def test_checkingDisabledEncode(self): """ L{dns.Message.toStr} encodes L{dns.Message.checkingDisabled} into byte4 of the byte string. """ self.assertEqual( dns.Message(checkingDisabled=1).toStr(), MESSAGE_CHECKING_DISABLED_BYTES ) def test_checkingDisabledDecode(self): """ L{dns.Message.fromStr} decodes byte4 and assigns bit4 to L{dns.Message.checkingDisabled}. """ m = dns.Message() m.fromStr(MESSAGE_CHECKING_DISABLED_BYTES) self.assertEqual(m.checkingDisabled, 1) def test_reprDefaults(self): """ L{dns.Message.__repr__} omits field values and sections which are identical to their defaults. The id field value is always shown. """ self.assertEqual( '<Message id=0>', repr(dns.Message()) ) def test_reprFlagsIfSet(self): """ L{dns.Message.__repr__} displays flags if they are L{True}. """ m = dns.Message(answer=True, auth=True, trunc=True, recDes=True, recAv=True, authenticData=True, checkingDisabled=True) self.assertEqual( '<Message ' 'id=0 ' 'flags=answer,auth,trunc,recDes,recAv,authenticData,' 'checkingDisabled' '>', repr(m), ) def test_reprNonDefautFields(self): """ L{dns.Message.__repr__} displays field values if they differ from their defaults. """ m = dns.Message(id=10, opCode=20, rCode=30, maxSize=40) self.assertEqual( '<Message ' 'id=10 ' 'opCode=20 ' 'rCode=30 ' 'maxSize=40' '>', repr(m), ) def test_reprNonDefaultSections(self): """ L{dns.Message.__repr__} displays sections which differ from their defaults. """ m = dns.Message() m.queries = [1, 2, 3] m.answers = [4, 5, 6] m.authority = [7, 8, 9] m.additional = [10, 11, 12] self.assertEqual( '<Message ' 'id=0 ' 'queries=[1, 2, 3] ' 'answers=[4, 5, 6] ' 'authority=[7, 8, 9] ' 'additional=[10, 11, 12]' '>', repr(m), ) def test_emptyMessage(self): """ Test that a message which has been truncated causes an EOFError to be raised when it is parsed. """ msg = dns.Message() self.assertRaises(EOFError, msg.fromStr, b'') def test_emptyQuery(self): """ Test that bytes representing an empty query message can be decoded as such. """ msg = dns.Message() msg.fromStr( b'\x01\x00' # Message ID b'\x00' # answer bit, opCode nibble, auth bit, trunc bit, recursive bit b'\x00' # recursion bit, empty bit, authenticData bit, # checkingDisabled bit, response code nibble b'\x00\x00' # number of queries b'\x00\x00' # number of answers b'\x00\x00' # number of authorities b'\x00\x00' # number of additionals ) self.assertEqual(msg.id, 256) self.assertFalse( msg.answer, "Message was not supposed to be an answer.") self.assertEqual(msg.opCode, dns.OP_QUERY) self.assertFalse( msg.auth, "Message was not supposed to be authoritative.") self.assertFalse( msg.trunc, "Message was not supposed to be truncated.") self.assertEqual(msg.queries, []) self.assertEqual(msg.answers, []) self.assertEqual(msg.authority, []) self.assertEqual(msg.additional, []) def test_NULL(self): """ A I{NULL} record with an arbitrary payload can be encoded and decoded as part of a L{dns.Message}. """ bytes = b''.join([dns._ord2bytes(i) for i in range(256)]) rec = dns.Record_NULL(bytes) rr = dns.RRHeader(b'testname', dns.NULL, payload=rec) msg1 = dns.Message() msg1.answers.append(rr) s = BytesIO() msg1.encode(s) s.seek(0, 0) msg2 = dns.Message() msg2.decode(s) self.assertIsInstance(msg2.answers[0].payload, dns.Record_NULL) self.assertEqual(msg2.answers[0].payload.payload, bytes) def test_lookupRecordTypeDefault(self): """ L{Message.lookupRecordType} returns C{dns.UnknownRecord} if it is called with an integer which doesn't correspond to any known record type. """ # 65280 is the first value in the range reserved for private # use, so it shouldn't ever conflict with an officially # allocated value. self.assertIs(dns.Message().lookupRecordType(65280), dns.UnknownRecord) def test_nonAuthoritativeMessage(self): """ The L{RRHeader} instances created by L{Message} from a non-authoritative message are marked as not authoritative. """ buf = BytesIO() answer = dns.RRHeader(payload=dns.Record_A('1.2.3.4', ttl=0)) answer.encode(buf) message = dns.Message() message.fromStr( b'\x01\x00' # Message ID # answer bit, opCode nibble, auth bit, trunc bit, recursive bit b'\x00' # recursion bit, empty bit, authenticData bit, # checkingDisabled bit, response code nibble b'\x00' b'\x00\x00' # number of queries b'\x00\x01' # number of answers b'\x00\x00' # number of authorities b'\x00\x00' # number of additionals + buf.getvalue() ) self.assertEqual(message.answers, [answer]) self.assertFalse(message.answers[0].auth) def test_authoritativeMessage(self): """ The L{RRHeader} instances created by L{Message} from an authoritative message are marked as authoritative. """ buf = BytesIO() answer = dns.RRHeader(payload=dns.Record_A('1.2.3.4', ttl=0)) answer.encode(buf) message = dns.Message() message.fromStr( b'\x01\x00' # Message ID # answer bit, opCode nibble, auth bit, trunc bit, recursive bit b'\x04' # recursion bit, empty bit, authenticData bit, # checkingDisabled bit, response code nibble b'\x00' b'\x00\x00' # number of queries b'\x00\x01' # number of answers b'\x00\x00' # number of authorities b'\x00\x00' # number of additionals + buf.getvalue() ) answer.auth = True self.assertEqual(message.answers, [answer]) self.assertTrue(message.answers[0].auth) class MessageComparisonTests(ComparisonTestsMixin, unittest.SynchronousTestCase): """ Tests for the rich comparison of L{dns.Message} instances. """ def messageFactory(self, *args, **kwargs): """ Create a L{dns.Message}. The L{dns.Message} constructor doesn't accept C{queries}, C{answers}, C{authority}, C{additional} arguments, so we extract them from the kwargs supplied to this factory function and assign them to the message. @param args: Positional arguments. @param kwargs: Keyword arguments. @return: A L{dns.Message} instance. """ queries = kwargs.pop('queries', []) answers = kwargs.pop('answers', []) authority = kwargs.pop('authority', []) additional = kwargs.pop('additional', []) m = dns.Message(**kwargs) if queries: m.queries = queries if answers: m.answers = answers if authority: m.authority = authority if additional: m.additional = additional return m def test_id(self): """ Two L{dns.Message} instances compare equal if they have the same id value. """ self.assertNormalEqualityImplementation( self.messageFactory(id=10), self.messageFactory(id=10), self.messageFactory(id=20), ) def test_answer(self): """ Two L{dns.Message} instances compare equal if they have the same answer flag. """ self.assertNormalEqualityImplementation( self.messageFactory(answer=1), self.messageFactory(answer=1), self.messageFactory(answer=0), ) def test_opCode(self): """ Two L{dns.Message} instances compare equal if they have the same opCode value. """ self.assertNormalEqualityImplementation( self.messageFactory(opCode=10), self.messageFactory(opCode=10), self.messageFactory(opCode=20), ) def test_recDes(self): """ Two L{dns.Message} instances compare equal if they have the same recDes flag. """ self.assertNormalEqualityImplementation( self.messageFactory(recDes=1), self.messageFactory(recDes=1), self.messageFactory(recDes=0), ) def test_recAv(self): """ Two L{dns.Message} instances compare equal if they have the same recAv flag. """ self.assertNormalEqualityImplementation( self.messageFactory(recAv=1), self.messageFactory(recAv=1), self.messageFactory(recAv=0), ) def test_auth(self): """ Two L{dns.Message} instances compare equal if they have the same auth flag. """ self.assertNormalEqualityImplementation( self.messageFactory(auth=1), self.messageFactory(auth=1), self.messageFactory(auth=0), ) def test_rCode(self): """ Two L{dns.Message} instances compare equal if they have the same rCode value. """ self.assertNormalEqualityImplementation( self.messageFactory(rCode=10), self.messageFactory(rCode=10), self.messageFactory(rCode=20), ) def test_trunc(self): """ Two L{dns.Message} instances compare equal if they have the same trunc flag. """ self.assertNormalEqualityImplementation( self.messageFactory(trunc=1), self.messageFactory(trunc=1), self.messageFactory(trunc=0), ) def test_maxSize(self): """ Two L{dns.Message} instances compare equal if they have the same maxSize value. """ self.assertNormalEqualityImplementation( self.messageFactory(maxSize=10), self.messageFactory(maxSize=10), self.messageFactory(maxSize=20), ) def test_authenticData(self): """ Two L{dns.Message} instances compare equal if they have the same authenticData flag. """ self.assertNormalEqualityImplementation( self.messageFactory(authenticData=1), self.messageFactory(authenticData=1), self.messageFactory(authenticData=0), ) def test_checkingDisabled(self): """ Two L{dns.Message} instances compare equal if they have the same checkingDisabled flag. """ self.assertNormalEqualityImplementation( self.messageFactory(checkingDisabled=1), self.messageFactory(checkingDisabled=1), self.messageFactory(checkingDisabled=0), ) def test_queries(self): """ Two L{dns.Message} instances compare equal if they have the same queries. """ self.assertNormalEqualityImplementation( self.messageFactory(queries=[dns.Query(b'example.com')]), self.messageFactory(queries=[dns.Query(b'example.com')]), self.messageFactory(queries=[dns.Query(b'example.org')]), ) def test_answers(self): """ Two L{dns.Message} instances compare equal if they have the same answers. """ self.assertNormalEqualityImplementation( self.messageFactory(answers=[dns.RRHeader( b'example.com', payload=dns.Record_A('1.2.3.4'))]), self.messageFactory(answers=[dns.RRHeader( b'example.com', payload=dns.Record_A('1.2.3.4'))]), self.messageFactory(answers=[dns.RRHeader( b'example.org', payload=dns.Record_A('4.3.2.1'))]), ) def test_authority(self): """ Two L{dns.Message} instances compare equal if they have the same authority records. """ self.assertNormalEqualityImplementation( self.messageFactory(authority=[dns.RRHeader( b'example.com', type=dns.SOA, payload=dns.Record_SOA())]), self.messageFactory(authority=[dns.RRHeader( b'example.com', type=dns.SOA, payload=dns.Record_SOA())]), self.messageFactory(authority=[dns.RRHeader( b'example.org', type=dns.SOA, payload=dns.Record_SOA())]), ) def test_additional(self): """ Two L{dns.Message} instances compare equal if they have the same additional records. """ self.assertNormalEqualityImplementation( self.messageFactory(additional=[dns.RRHeader( b'example.com', payload=dns.Record_A('1.2.3.4'))]), self.messageFactory(additional=[dns.RRHeader( b'example.com', payload=dns.Record_A('1.2.3.4'))]), self.messageFactory(additional=[dns.RRHeader( b'example.org', payload=dns.Record_A('1.2.3.4'))]), ) class TestController(object): """ Pretend to be a DNS query processor for a DNSDatagramProtocol. @ivar messages: the list of received messages. @type messages: C{list} of (msg, protocol, address) """ def __init__(self): """ Initialize the controller: create a list of messages. """ self.messages = [] def messageReceived(self, msg, proto, addr=None): """ Save the message so that it can be checked during the tests. """ self.messages.append((msg, proto, addr)) class DatagramProtocolTests(unittest.TestCase): """ Test various aspects of L{dns.DNSDatagramProtocol}. """ def setUp(self): """ Create a L{dns.DNSDatagramProtocol} with a deterministic clock. """ self.clock = task.Clock() self.controller = TestController() self.proto = dns.DNSDatagramProtocol(self.controller) transport = proto_helpers.FakeDatagramTransport() self.proto.makeConnection(transport) self.proto.callLater = self.clock.callLater def test_truncatedPacket(self): """ Test that when a short datagram is received, datagramReceived does not raise an exception while processing it. """ self.proto.datagramReceived( b'', address.IPv4Address('UDP', '127.0.0.1', 12345)) self.assertEqual(self.controller.messages, []) def test_simpleQuery(self): """ Test content received after a query. """ d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')]) self.assertEqual(len(self.proto.liveMessages.keys()), 1) m = dns.Message() m.id = next(iter(self.proto.liveMessages.keys())) m.answers = [dns.RRHeader(payload=dns.Record_A(address='1.2.3.4'))] def cb(result): self.assertEqual(result.answers[0].payload.dottedQuad(), '1.2.3.4') d.addCallback(cb) self.proto.datagramReceived(m.toStr(), ('127.0.0.1', 21345)) return d def test_queryTimeout(self): """ Test that query timeouts after some seconds. """ d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')]) self.assertEqual(len(self.proto.liveMessages), 1) self.clock.advance(10) self.assertFailure(d, dns.DNSQueryTimeoutError) self.assertEqual(len(self.proto.liveMessages), 0) return d def test_writeError(self): """ Exceptions raised by the transport's write method should be turned into C{Failure}s passed to errbacks of the C{Deferred} returned by L{DNSDatagramProtocol.query}. """ def writeError(message, addr): raise RuntimeError("bar") self.proto.transport.write = writeError d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')]) return self.assertFailure(d, RuntimeError) def test_listenError(self): """ Exception L{CannotListenError} raised by C{listenUDP} should be turned into a C{Failure} passed to errback of the C{Deferred} returned by L{DNSDatagramProtocol.query}. """ def startListeningError(): raise CannotListenError(None, None, None) self.proto.startListening = startListeningError # Clean up transport so that the protocol calls startListening again self.proto.transport = None d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')]) return self.assertFailure(d, CannotListenError) def test_receiveMessageNotInLiveMessages(self): """ When receiving a message whose id is not in L{DNSDatagramProtocol.liveMessages} or L{DNSDatagramProtocol.resends}, the message will be received by L{DNSDatagramProtocol.controller}. """ message = dns.Message() message.id = 1 message.answers = [dns.RRHeader( payload=dns.Record_A(address='1.2.3.4'))] self.proto.datagramReceived(message.toStr(), ('127.0.0.1', 21345)) self.assertEqual(self.controller.messages[-1][0].toStr(), message.toStr()) class TestTCPController(TestController): """ Pretend to be a DNS query processor for a DNSProtocol. @ivar connections: A list of L{DNSProtocol} instances which have notified this controller that they are connected and have not yet notified it that their connection has been lost. """ def __init__(self): TestController.__init__(self) self.connections = [] def connectionMade(self, proto): self.connections.append(proto) def connectionLost(self, proto): self.connections.remove(proto) class DNSProtocolTests(unittest.TestCase): """ Test various aspects of L{dns.DNSProtocol}. """ def setUp(self): """ Create a L{dns.DNSProtocol} with a deterministic clock. """ self.clock = task.Clock() self.controller = TestTCPController() self.proto = dns.DNSProtocol(self.controller) self.proto.makeConnection(proto_helpers.StringTransport()) self.proto.callLater = self.clock.callLater def test_connectionTracking(self): """ L{dns.DNSProtocol} calls its controller's C{connectionMade} method with itself when it is connected to a transport and its controller's C{connectionLost} method when it is disconnected. """ self.assertEqual(self.controller.connections, [self.proto]) self.proto.connectionLost( Failure(ConnectionDone("Fake Connection Done"))) self.assertEqual(self.controller.connections, []) def test_queryTimeout(self): """ Test that query timeouts after some seconds. """ d = self.proto.query([dns.Query(b'foo')]) self.assertEqual(len(self.proto.liveMessages), 1) self.clock.advance(60) self.assertFailure(d, dns.DNSQueryTimeoutError) self.assertEqual(len(self.proto.liveMessages), 0) return d def test_simpleQuery(self): """ Test content received after a query. """ d = self.proto.query([dns.Query(b'foo')]) self.assertEqual(len(self.proto.liveMessages.keys()), 1) m = dns.Message() m.id = next(iter(self.proto.liveMessages.keys())) m.answers = [dns.RRHeader(payload=dns.Record_A(address='1.2.3.4'))] def cb(result): self.assertEqual(result.answers[0].payload.dottedQuad(), '1.2.3.4') d.addCallback(cb) s = m.toStr() s = struct.pack('!H', len(s)) + s self.proto.dataReceived(s) return d def test_writeError(self): """ Exceptions raised by the transport's write method should be turned into C{Failure}s passed to errbacks of the C{Deferred} returned by L{DNSProtocol.query}. """ def writeError(message): raise RuntimeError("bar") self.proto.transport.write = writeError d = self.proto.query([dns.Query(b'foo')]) return self.assertFailure(d, RuntimeError) def test_receiveMessageNotInLiveMessages(self): """ When receiving a message whose id is not in L{DNSProtocol.liveMessages} the message will be received by L{DNSProtocol.controller}. """ message = dns.Message() message.id = 1 message.answers = [dns.RRHeader( payload=dns.Record_A(address='1.2.3.4'))] string = message.toStr() string = struct.pack('!H', len(string)) + string self.proto.dataReceived(string) self.assertEqual(self.controller.messages[-1][0].toStr(), message.toStr()) class ReprTests(unittest.TestCase): """ Tests for the C{__repr__} implementation of record classes. """ def test_ns(self): """ The repr of a L{dns.Record_NS} instance includes the name of the nameserver and the TTL of the record. """ self.assertEqual( repr(dns.Record_NS(b'example.com', 4321)), "<NS name=example.com ttl=4321>") def test_md(self): """ The repr of a L{dns.Record_MD} instance includes the name of the mail destination and the TTL of the record. """ self.assertEqual( repr(dns.Record_MD(b'example.com', 4321)), "<MD name=example.com ttl=4321>") def test_mf(self): """ The repr of a L{dns.Record_MF} instance includes the name of the mail forwarder and the TTL of the record. """ self.assertEqual( repr(dns.Record_MF(b'example.com', 4321)), "<MF name=example.com ttl=4321>") def test_cname(self): """ The repr of a L{dns.Record_CNAME} instance includes the name of the mail forwarder and the TTL of the record. """ self.assertEqual( repr(dns.Record_CNAME(b'example.com', 4321)), "<CNAME name=example.com ttl=4321>") def test_mb(self): """ The repr of a L{dns.Record_MB} instance includes the name of the mailbox and the TTL of the record. """ self.assertEqual( repr(dns.Record_MB(b'example.com', 4321)), "<MB name=example.com ttl=4321>") def test_mg(self): """ The repr of a L{dns.Record_MG} instance includes the name of the mail group member and the TTL of the record. """ self.assertEqual( repr(dns.Record_MG(b'example.com', 4321)), "<MG name=example.com ttl=4321>") def test_mr(self): """ The repr of a L{dns.Record_MR} instance includes the name of the mail rename domain and the TTL of the record. """ self.assertEqual( repr(dns.Record_MR(b'example.com', 4321)), "<MR name=example.com ttl=4321>") def test_ptr(self): """ The repr of a L{dns.Record_PTR} instance includes the name of the pointer and the TTL of the record. """ self.assertEqual( repr(dns.Record_PTR(b'example.com', 4321)), "<PTR name=example.com ttl=4321>") def test_dname(self): """ The repr of a L{dns.Record_DNAME} instance includes the name of the non-terminal DNS name redirection and the TTL of the record. """ self.assertEqual( repr(dns.Record_DNAME(b'example.com', 4321)), "<DNAME name=example.com ttl=4321>") def test_a(self): """ The repr of a L{dns.Record_A} instance includes the dotted-quad string representation of the address it is for and the TTL of the record. """ self.assertEqual( repr(dns.Record_A('1.2.3.4', 567)), '<A address=1.2.3.4 ttl=567>') def test_soa(self): """ The repr of a L{dns.Record_SOA} instance includes all of the authority fields. """ self.assertEqual( repr(dns.Record_SOA(mname=b'mName', rname=b'rName', serial=123, refresh=456, retry=789, expire=10, minimum=11, ttl=12)), "<SOA mname=mName rname=rName serial=123 refresh=456 " "retry=789 expire=10 minimum=11 ttl=12>") def test_null(self): """ The repr of a L{dns.Record_NULL} instance includes the repr of its payload and the TTL of the record. """ self.assertEqual( repr(dns.Record_NULL(b'abcd', 123)), "<NULL payload='abcd' ttl=123>") def test_wks(self): """ The repr of a L{dns.Record_WKS} instance includes the dotted-quad string representation of the address it is for, the IP protocol number it is for, and the TTL of the record. """ self.assertEqual( repr(dns.Record_WKS('2.3.4.5', 7, ttl=8)), "<WKS address=2.3.4.5 protocol=7 ttl=8>") def test_aaaa(self): """ The repr of a L{dns.Record_AAAA} instance includes the colon-separated hex string representation of the address it is for and the TTL of the record. """ self.assertEqual( repr(dns.Record_AAAA('8765::1234', ttl=10)), "<AAAA address=8765::1234 ttl=10>") def test_a6(self): """ The repr of a L{dns.Record_A6} instance includes the colon-separated hex string representation of the address it is for and the TTL of the record. """ self.assertEqual( repr(dns.Record_A6(0, '1234::5678', b'foo.bar', ttl=10)), "<A6 suffix=1234::5678 prefix=foo.bar ttl=10>") def test_srv(self): """ The repr of a L{dns.Record_SRV} instance includes the name and port of the target and the priority, weight, and TTL of the record. """ self.assertEqual( repr(dns.Record_SRV(1, 2, 3, b'example.org', 4)), "<SRV priority=1 weight=2 target=example.org port=3 ttl=4>") def test_naptr(self): """ The repr of a L{dns.Record_NAPTR} instance includes the order, preference, flags, service, regular expression, replacement, and TTL of the record. """ record = dns.Record_NAPTR( 5, 9, b"S", b"http", b"/foo/bar/i", b"baz", 3) self.assertEqual( repr(record), "<NAPTR order=5 preference=9 flags=S service=http " "regexp=/foo/bar/i replacement=baz ttl=3>") def test_afsdb(self): """ The repr of a L{dns.Record_AFSDB} instance includes the subtype, hostname, and TTL of the record. """ self.assertEqual( repr(dns.Record_AFSDB(3, b'example.org', 5)), "<AFSDB subtype=3 hostname=example.org ttl=5>") def test_rp(self): """ The repr of a L{dns.Record_RP} instance includes the mbox, txt, and TTL fields of the record. """ self.assertEqual( repr(dns.Record_RP(b'alice.example.com', b'admin.example.com', 3)), "<RP mbox=alice.example.com txt=admin.example.com ttl=3>") def test_hinfo(self): """ The repr of a L{dns.Record_HINFO} instance includes the cpu, os, and TTL fields of the record. """ self.assertEqual( repr(dns.Record_HINFO(b'sparc', b'minix', 12)), "<HINFO cpu='sparc' os='minix' ttl=12>") def test_minfo(self): """ The repr of a L{dns.Record_MINFO} instance includes the rmailbx, emailbx, and TTL fields of the record. """ record = dns.Record_MINFO( b'alice.example.com', b'bob.example.com', 15) self.assertEqual( repr(record), "<MINFO responsibility=alice.example.com " "errors=bob.example.com ttl=15>") def test_mx(self): """ The repr of a L{dns.Record_MX} instance includes the preference, name, and TTL fields of the record. """ self.assertEqual( repr(dns.Record_MX(13, b'mx.example.com', 2)), "<MX preference=13 name=mx.example.com ttl=2>") def test_txt(self): """ The repr of a L{dns.Record_TXT} instance includes the data and ttl fields of the record. """ self.assertEqual( repr(dns.Record_TXT(b"foo", b"bar", ttl=15)), "<TXT data=['foo', 'bar'] ttl=15>") def test_spf(self): """ The repr of a L{dns.Record_SPF} instance includes the data and ttl fields of the record. """ self.assertEqual( repr(dns.Record_SPF(b"foo", b"bar", ttl=15)), "<SPF data=['foo', 'bar'] ttl=15>") def test_unknown(self): """ The repr of a L{dns.UnknownRecord} instance includes the data and ttl fields of the record. """ self.assertEqual( repr(dns.UnknownRecord(b"foo\x1fbar", 12)), "<UNKNOWN data='foo\\x1fbar' ttl=12>") class EqualityTests(ComparisonTestsMixin, unittest.TestCase): """ Tests for the equality and non-equality behavior of record classes. """ def _equalityTest(self, firstValueOne, secondValueOne, valueTwo): return self.assertNormalEqualityImplementation( firstValueOne, secondValueOne, valueTwo) def test_charstr(self): """ Two L{dns.Charstr} instances compare equal if and only if they have the same string value. """ self._equalityTest( dns.Charstr(b'abc'), dns.Charstr(b'abc'), dns.Charstr(b'def')) def test_name(self): """ Two L{dns.Name} instances compare equal if and only if they have the same name value. """ self._equalityTest( dns.Name(b'abc'), dns.Name(b'abc'), dns.Name(b'def')) def _simpleEqualityTest(self, cls): """ Assert that instances of C{cls} with the same attributes compare equal to each other and instances with different attributes compare as not equal. @param cls: A L{dns.SimpleRecord} subclass. """ # Vary the TTL self._equalityTest( cls(b'example.com', 123), cls(b'example.com', 123), cls(b'example.com', 321)) # Vary the name self._equalityTest( cls(b'example.com', 123), cls(b'example.com', 123), cls(b'example.org', 123)) def test_rrheader(self): """ Two L{dns.RRHeader} instances compare equal if and only if they have the same name, type, class, time to live, payload, and authoritative bit. """ # Vary the name self._equalityTest( dns.RRHeader(b'example.com', payload=dns.Record_A('1.2.3.4')), dns.RRHeader(b'example.com', payload=dns.Record_A('1.2.3.4')), dns.RRHeader(b'example.org', payload=dns.Record_A('1.2.3.4'))) # Vary the payload self._equalityTest( dns.RRHeader(b'example.com', payload=dns.Record_A('1.2.3.4')), dns.RRHeader(b'example.com', payload=dns.Record_A('1.2.3.4')), dns.RRHeader(b'example.com', payload=dns.Record_A('1.2.3.5'))) # Vary the type. Leave the payload as None so that we don't have to # provide non-equal values. self._equalityTest( dns.RRHeader(b'example.com', dns.A), dns.RRHeader(b'example.com', dns.A), dns.RRHeader(b'example.com', dns.MX)) # Probably not likely to come up. Most people use the internet. self._equalityTest( dns.RRHeader(b'example.com', cls=dns.IN, payload=dns.Record_A('1.2.3.4')), dns.RRHeader(b'example.com', cls=dns.IN, payload=dns.Record_A('1.2.3.4')), dns.RRHeader(b'example.com', cls=dns.CS, payload=dns.Record_A('1.2.3.4'))) # Vary the ttl self._equalityTest( dns.RRHeader(b'example.com', ttl=60, payload=dns.Record_A('1.2.3.4')), dns.RRHeader(b'example.com', ttl=60, payload=dns.Record_A('1.2.3.4')), dns.RRHeader(b'example.com', ttl=120, payload=dns.Record_A('1.2.3.4'))) # Vary the auth bit self._equalityTest( dns.RRHeader(b'example.com', auth=1, payload=dns.Record_A('1.2.3.4')), dns.RRHeader(b'example.com', auth=1, payload=dns.Record_A('1.2.3.4')), dns.RRHeader(b'example.com', auth=0, payload=dns.Record_A('1.2.3.4'))) def test_ns(self): """ Two L{dns.Record_NS} instances compare equal if and only if they have the same name and TTL. """ self._simpleEqualityTest(dns.Record_NS) def test_md(self): """ Two L{dns.Record_MD} instances compare equal if and only if they have the same name and TTL. """ self._simpleEqualityTest(dns.Record_MD) def test_mf(self): """ Two L{dns.Record_MF} instances compare equal if and only if they have the same name and TTL. """ self._simpleEqualityTest(dns.Record_MF) def test_cname(self): """ Two L{dns.Record_CNAME} instances compare equal if and only if they have the same name and TTL. """ self._simpleEqualityTest(dns.Record_CNAME) def test_mb(self): """ Two L{dns.Record_MB} instances compare equal if and only if they have the same name and TTL. """ self._simpleEqualityTest(dns.Record_MB) def test_mg(self): """ Two L{dns.Record_MG} instances compare equal if and only if they have the same name and TTL. """ self._simpleEqualityTest(dns.Record_MG) def test_mr(self): """ Two L{dns.Record_MR} instances compare equal if and only if they have the same name and TTL. """ self._simpleEqualityTest(dns.Record_MR) def test_ptr(self): """ Two L{dns.Record_PTR} instances compare equal if and only if they have the same name and TTL. """ self._simpleEqualityTest(dns.Record_PTR) def test_dname(self): """ Two L{dns.Record_MD} instances compare equal if and only if they have the same name and TTL. """ self._simpleEqualityTest(dns.Record_DNAME) def test_a(self): """ Two L{dns.Record_A} instances compare equal if and only if they have the same address and TTL. """ # Vary the TTL self._equalityTest( dns.Record_A('1.2.3.4', 5), dns.Record_A('1.2.3.4', 5), dns.Record_A('1.2.3.4', 6)) # Vary the address self._equalityTest( dns.Record_A('1.2.3.4', 5), dns.Record_A('1.2.3.4', 5), dns.Record_A('1.2.3.5', 5)) def test_soa(self): """ Two L{dns.Record_SOA} instances compare equal if and only if they have the same mname, rname, serial, refresh, minimum, expire, retry, and ttl. """ # Vary the mname self._equalityTest( dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'xname', b'rname', 123, 456, 789, 10, 20, 30)) # Vary the rname self._equalityTest( dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'mname', b'xname', 123, 456, 789, 10, 20, 30)) # Vary the serial self._equalityTest( dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'mname', b'rname', 1, 456, 789, 10, 20, 30)) # Vary the refresh self._equalityTest( dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'mname', b'rname', 123, 1, 789, 10, 20, 30)) # Vary the minimum self._equalityTest( dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'mname', b'rname', 123, 456, 1, 10, 20, 30)) # Vary the expire self._equalityTest( dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 1, 20, 30)) # Vary the retry self._equalityTest( dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 1, 30)) # Vary the ttl self._equalityTest( dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30), dns.Record_SOA(b'mname', b'xname', 123, 456, 789, 10, 20, 1)) def test_null(self): """ Two L{dns.Record_NULL} instances compare equal if and only if they have the same payload and ttl. """ # Vary the payload self._equalityTest( dns.Record_NULL('foo bar', 10), dns.Record_NULL('foo bar', 10), dns.Record_NULL('bar foo', 10)) # Vary the ttl self._equalityTest( dns.Record_NULL('foo bar', 10), dns.Record_NULL('foo bar', 10), dns.Record_NULL('foo bar', 100)) def test_wks(self): """ Two L{dns.Record_WKS} instances compare equal if and only if they have the same address, protocol, map, and ttl. """ # Vary the address self._equalityTest( dns.Record_WKS('1.2.3.4', 1, 'foo', 2), dns.Record_WKS('1.2.3.4', 1, 'foo', 2), dns.Record_WKS('4.3.2.1', 1, 'foo', 2)) # Vary the protocol self._equalityTest( dns.Record_WKS('1.2.3.4', 1, 'foo', 2), dns.Record_WKS('1.2.3.4', 1, 'foo', 2), dns.Record_WKS('1.2.3.4', 100, 'foo', 2)) # Vary the map self._equalityTest( dns.Record_WKS('1.2.3.4', 1, 'foo', 2), dns.Record_WKS('1.2.3.4', 1, 'foo', 2), dns.Record_WKS('1.2.3.4', 1, 'bar', 2)) # Vary the ttl self._equalityTest( dns.Record_WKS('1.2.3.4', 1, 'foo', 2), dns.Record_WKS('1.2.3.4', 1, 'foo', 2), dns.Record_WKS('1.2.3.4', 1, 'foo', 200)) def test_aaaa(self): """ Two L{dns.Record_AAAA} instances compare equal if and only if they have the same address and ttl. """ # Vary the address self._equalityTest( dns.Record_AAAA('1::2', 1), dns.Record_AAAA('1::2', 1), dns.Record_AAAA('2::1', 1)) # Vary the ttl self._equalityTest( dns.Record_AAAA('1::2', 1), dns.Record_AAAA('1::2', 1), dns.Record_AAAA('1::2', 10)) def test_a6(self): """ Two L{dns.Record_A6} instances compare equal if and only if they have the same prefix, prefix length, suffix, and ttl. """ # Note, A6 is crazy, I'm not sure these values are actually legal. # Hopefully that doesn't matter for this test. -exarkun # Vary the prefix length self._equalityTest( dns.Record_A6(16, '::abcd', b'example.com', 10), dns.Record_A6(16, '::abcd', b'example.com', 10), dns.Record_A6(32, '::abcd', b'example.com', 10)) # Vary the suffix self._equalityTest( dns.Record_A6(16, '::abcd', b'example.com', 10), dns.Record_A6(16, '::abcd', b'example.com', 10), dns.Record_A6(16, '::abcd:0', b'example.com', 10)) # Vary the prefix self._equalityTest( dns.Record_A6(16, '::abcd', b'example.com', 10), dns.Record_A6(16, '::abcd', b'example.com', 10), dns.Record_A6(16, '::abcd', b'example.org', 10)) # Vary the ttl self._equalityTest( dns.Record_A6(16, '::abcd', b'example.com', 10), dns.Record_A6(16, '::abcd', b'example.com', 10), dns.Record_A6(16, '::abcd', b'example.com', 100)) def test_srv(self): """ Two L{dns.Record_SRV} instances compare equal if and only if they have the same priority, weight, port, target, and ttl. """ # Vary the priority self._equalityTest( dns.Record_SRV(10, 20, 30, b'example.com', 40), dns.Record_SRV(10, 20, 30, b'example.com', 40), dns.Record_SRV(100, 20, 30, b'example.com', 40)) # Vary the weight self._equalityTest( dns.Record_SRV(10, 20, 30, b'example.com', 40), dns.Record_SRV(10, 20, 30, b'example.com', 40), dns.Record_SRV(10, 200, 30, b'example.com', 40)) # Vary the port self._equalityTest( dns.Record_SRV(10, 20, 30, b'example.com', 40), dns.Record_SRV(10, 20, 30, b'example.com', 40), dns.Record_SRV(10, 20, 300, b'example.com', 40)) # Vary the target self._equalityTest( dns.Record_SRV(10, 20, 30, b'example.com', 40), dns.Record_SRV(10, 20, 30, b'example.com', 40), dns.Record_SRV(10, 20, 30, b'example.org', 40)) # Vary the ttl self._equalityTest( dns.Record_SRV(10, 20, 30, b'example.com', 40), dns.Record_SRV(10, 20, 30, b'example.com', 40), dns.Record_SRV(10, 20, 30, b'example.com', 400)) def test_naptr(self): """ Two L{dns.Record_NAPTR} instances compare equal if and only if they have the same order, preference, flags, service, regexp, replacement, and ttl. """ # Vary the order self._equalityTest( dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12), dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12), dns.Record_NAPTR(2, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12)) # Vary the preference self._equalityTest( dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12), dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12), dns.Record_NAPTR(1, 3, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12)) # Vary the flags self._equalityTest( dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12), dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12), dns.Record_NAPTR(1, 2, b"p", b"sip+E2U", b"/foo/bar/", b"baz", 12)) # Vary the service self._equalityTest( dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12), dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12), dns.Record_NAPTR(1, 2, b"u", b"http", b"/foo/bar/", b"baz", 12)) # Vary the regexp self._equalityTest( dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12), dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12), dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/bar/foo/", b"baz", 12)) # Vary the replacement self._equalityTest( dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12), dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12), dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/bar/foo/", b"quux", 12)) # Vary the ttl self._equalityTest( dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12), dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12), dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/bar/foo/", b"baz", 5)) def test_afsdb(self): """ Two L{dns.Record_AFSDB} instances compare equal if and only if they have the same subtype, hostname, and ttl. """ # Vary the subtype self._equalityTest( dns.Record_AFSDB(1, b'example.com', 2), dns.Record_AFSDB(1, b'example.com', 2), dns.Record_AFSDB(2, b'example.com', 2)) # Vary the hostname self._equalityTest( dns.Record_AFSDB(1, b'example.com', 2), dns.Record_AFSDB(1, b'example.com', 2), dns.Record_AFSDB(1, b'example.org', 2)) # Vary the ttl self._equalityTest( dns.Record_AFSDB(1, b'example.com', 2), dns.Record_AFSDB(1, b'example.com', 2), dns.Record_AFSDB(1, b'example.com', 3)) def test_rp(self): """ Two L{Record_RP} instances compare equal if and only if they have the same mbox, txt, and ttl. """ # Vary the mbox self._equalityTest( dns.Record_RP(b'alice.example.com', b'alice is nice', 10), dns.Record_RP(b'alice.example.com', b'alice is nice', 10), dns.Record_RP(b'bob.example.com', b'alice is nice', 10)) # Vary the txt self._equalityTest( dns.Record_RP(b'alice.example.com', b'alice is nice', 10), dns.Record_RP(b'alice.example.com', b'alice is nice', 10), dns.Record_RP(b'alice.example.com', b'alice is not nice', 10)) # Vary the ttl self._equalityTest( dns.Record_RP(b'alice.example.com', b'alice is nice', 10), dns.Record_RP(b'alice.example.com', b'alice is nice', 10), dns.Record_RP(b'alice.example.com', b'alice is nice', 100)) def test_hinfo(self): """ Two L{dns.Record_HINFO} instances compare equal if and only if they have the same cpu, os, and ttl. """ # Vary the cpu self._equalityTest( dns.Record_HINFO('x86-64', 'plan9', 10), dns.Record_HINFO('x86-64', 'plan9', 10), dns.Record_HINFO('i386', 'plan9', 10)) # Vary the os self._equalityTest( dns.Record_HINFO('x86-64', 'plan9', 10), dns.Record_HINFO('x86-64', 'plan9', 10), dns.Record_HINFO('x86-64', 'plan11', 10)) # Vary the ttl self._equalityTest( dns.Record_HINFO('x86-64', 'plan9', 10), dns.Record_HINFO('x86-64', 'plan9', 10), dns.Record_HINFO('x86-64', 'plan9', 100)) def test_minfo(self): """ Two L{dns.Record_MINFO} instances compare equal if and only if they have the same rmailbx, emailbx, and ttl. """ # Vary the rmailbx self._equalityTest( dns.Record_MINFO(b'rmailbox', b'emailbox', 10), dns.Record_MINFO(b'rmailbox', b'emailbox', 10), dns.Record_MINFO(b'someplace', b'emailbox', 10)) # Vary the emailbx self._equalityTest( dns.Record_MINFO(b'rmailbox', b'emailbox', 10), dns.Record_MINFO(b'rmailbox', b'emailbox', 10), dns.Record_MINFO(b'rmailbox', b'something', 10)) # Vary the ttl self._equalityTest( dns.Record_MINFO(b'rmailbox', b'emailbox', 10), dns.Record_MINFO(b'rmailbox', b'emailbox', 10), dns.Record_MINFO(b'rmailbox', b'emailbox', 100)) def test_mx(self): """ Two L{dns.Record_MX} instances compare equal if and only if they have the same preference, name, and ttl. """ # Vary the preference self._equalityTest( dns.Record_MX(10, b'example.org', 20), dns.Record_MX(10, b'example.org', 20), dns.Record_MX(100, b'example.org', 20)) # Vary the name self._equalityTest( dns.Record_MX(10, b'example.org', 20), dns.Record_MX(10, b'example.org', 20), dns.Record_MX(10, b'example.net', 20)) # Vary the ttl self._equalityTest( dns.Record_MX(10, b'example.org', 20), dns.Record_MX(10, b'example.org', 20), dns.Record_MX(10, b'example.org', 200)) def test_txt(self): """ Two L{dns.Record_TXT} instances compare equal if and only if they have the same data and ttl. """ # Vary the length of the data self._equalityTest( dns.Record_TXT('foo', 'bar', ttl=10), dns.Record_TXT('foo', 'bar', ttl=10), dns.Record_TXT('foo', 'bar', 'baz', ttl=10)) # Vary the value of the data self._equalityTest( dns.Record_TXT('foo', 'bar', ttl=10), dns.Record_TXT('foo', 'bar', ttl=10), dns.Record_TXT('bar', 'foo', ttl=10)) # Vary the ttl self._equalityTest( dns.Record_TXT('foo', 'bar', ttl=10), dns.Record_TXT('foo', 'bar', ttl=10), dns.Record_TXT('foo', 'bar', ttl=100)) def test_spf(self): """ L{dns.Record_SPF} instances compare equal if and only if they have the same data and ttl. """ # Vary the length of the data self._equalityTest( dns.Record_SPF('foo', 'bar', ttl=10), dns.Record_SPF('foo', 'bar', ttl=10), dns.Record_SPF('foo', 'bar', 'baz', ttl=10)) # Vary the value of the data self._equalityTest( dns.Record_SPF('foo', 'bar', ttl=10), dns.Record_SPF('foo', 'bar', ttl=10), dns.Record_SPF('bar', 'foo', ttl=10)) # Vary the ttl self._equalityTest( dns.Record_SPF('foo', 'bar', ttl=10), dns.Record_SPF('foo', 'bar', ttl=10), dns.Record_SPF('foo', 'bar', ttl=100)) def test_unknown(self): """ L{dns.UnknownRecord} instances compare equal if and only if they have the same data and ttl. """ # Vary the length of the data self._equalityTest( dns.UnknownRecord('foo', ttl=10), dns.UnknownRecord('foo', ttl=10), dns.UnknownRecord('foobar', ttl=10)) # Vary the value of the data self._equalityTest( dns.UnknownRecord('foo', ttl=10), dns.UnknownRecord('foo', ttl=10), dns.UnknownRecord('bar', ttl=10)) # Vary the ttl self._equalityTest( dns.UnknownRecord('foo', ttl=10), dns.UnknownRecord('foo', ttl=10), dns.UnknownRecord('foo', ttl=100)) class RRHeaderTests(unittest.TestCase): """ Tests for L{twisted.names.dns.RRHeader}. """ def test_negativeTTL(self): """ Attempting to create a L{dns.RRHeader} instance with a negative TTL causes L{ValueError} to be raised. """ self.assertRaises( ValueError, dns.RRHeader, "example.com", dns.A, dns.IN, -1, dns.Record_A("127.0.0.1")) def test_nonIntegralTTL(self): """ L{dns.RRHeader} converts TTLs to integers. """ ttlAsFloat = 123.45 header = dns.RRHeader("example.com", dns.A, dns.IN, ttlAsFloat, dns.Record_A("127.0.0.1")) self.assertEqual(header.ttl, int(ttlAsFloat)) def test_nonNumericTTLRaisesTypeError(self): """ Attempting to create a L{dns.RRHeader} instance with a TTL that L{int} cannot convert to an integer raises a L{TypeError}. """ self.assertRaises( ValueError, dns.RRHeader, "example.com", dns.A, dns.IN, "this is not a number", dns.Record_A("127.0.0.1")) class NameToLabelsTests(unittest.SynchronousTestCase): """ Tests for L{twisted.names.dns._nameToLabels}. """ def test_empty(self): """ L{dns._nameToLabels} returns a list containing a single empty label for an empty name. """ self.assertEqual(dns._nameToLabels(b''), [b'']) def test_onlyDot(self): """ L{dns._nameToLabels} returns a list containing a single empty label for a name containing only a dot. """ self.assertEqual(dns._nameToLabels(b'.'), [b'']) def test_withoutTrailingDot(self): """ L{dns._nameToLabels} returns a list ending with an empty label for a name without a trailing dot. """ self.assertEqual(dns._nameToLabels(b'com'), [b'com', b'']) def test_withTrailingDot(self): """ L{dns._nameToLabels} returns a list ending with an empty label for a name with a trailing dot. """ self.assertEqual(dns._nameToLabels(b'com.'), [b'com', b'']) def test_subdomain(self): """ L{dns._nameToLabels} returns a list containing entries for all labels in a subdomain name. """ self.assertEqual( dns._nameToLabels(b'foo.bar.baz.example.com.'), [b'foo', b'bar', b'baz', b'example', b'com', b'']) def test_casePreservation(self): """ L{dns._nameToLabels} preserves the case of ascii characters in labels. """ self.assertEqual( dns._nameToLabels(b'EXAMPLE.COM'), [b'EXAMPLE', b'COM', b'']) def assertIsSubdomainOf(testCase, descendant, ancestor): """ Assert that C{descendant} *is* a subdomain of C{ancestor}. @type testCase: L{unittest.SynchronousTestCase} @param testCase: The test case on which to run the assertions. @type descendant: C{str} @param descendant: The subdomain name to test. @type ancestor: C{str} @param ancestor: The superdomain name to test. """ testCase.assertTrue( dns._isSubdomainOf(descendant, ancestor), '%r is not a subdomain of %r' % (descendant, ancestor)) def assertIsNotSubdomainOf(testCase, descendant, ancestor): """ Assert that C{descendant} *is not* a subdomain of C{ancestor}. @type testCase: L{unittest.SynchronousTestCase} @param testCase: The test case on which to run the assertions. @type descendant: C{str} @param descendant: The subdomain name to test. @type ancestor: C{str} @param ancestor: The superdomain name to test. """ testCase.assertFalse( dns._isSubdomainOf(descendant, ancestor), '%r is a subdomain of %r' % (descendant, ancestor)) class IsSubdomainOfTests(unittest.SynchronousTestCase): """ Tests for L{twisted.names.dns._isSubdomainOf}. """ def test_identical(self): """ L{dns._isSubdomainOf} returns C{True} for identical domain names. """ assertIsSubdomainOf(self, b'example.com', b'example.com') def test_parent(self): """ L{dns._isSubdomainOf} returns C{True} when the first name is an immediate descendant of the second name. """ assertIsSubdomainOf(self, b'foo.example.com', b'example.com') def test_distantAncestor(self): """ L{dns._isSubdomainOf} returns C{True} when the first name is a distant descendant of the second name. """ assertIsSubdomainOf(self, b'foo.bar.baz.example.com', b'com') def test_superdomain(self): """ L{dns._isSubdomainOf} returns C{False} when the first name is an ancestor of the second name. """ assertIsNotSubdomainOf(self, b'example.com', b'foo.example.com') def test_sibling(self): """ L{dns._isSubdomainOf} returns C{False} if the first name is a sibling of the second name. """ assertIsNotSubdomainOf(self, b'foo.example.com', b'bar.example.com') def test_unrelatedCommonSuffix(self): """ L{dns._isSubdomainOf} returns C{False} even when domain names happen to share a common suffix. """ assertIsNotSubdomainOf(self, b'foo.myexample.com', b'example.com') def test_subdomainWithTrailingDot(self): """ L{dns._isSubdomainOf} returns C{True} if the first name is a subdomain of the second name but the first name has a trailing ".". """ assertIsSubdomainOf(self, b'foo.example.com.', b'example.com') def test_superdomainWithTrailingDot(self): """ L{dns._isSubdomainOf} returns C{True} if the first name is a subdomain of the second name but the second name has a trailing ".". """ assertIsSubdomainOf(self, b'foo.example.com', b'example.com.') def test_bothWithTrailingDot(self): """ L{dns._isSubdomainOf} returns C{True} if the first name is a subdomain of the second name and both names have a trailing ".". """ assertIsSubdomainOf(self, b'foo.example.com.', b'example.com.') def test_emptySubdomain(self): """ L{dns._isSubdomainOf} returns C{False} if the first name is empty and the second name is not. """ assertIsNotSubdomainOf(self, b'', b'example.com') def test_emptySuperdomain(self): """ L{dns._isSubdomainOf} returns C{True} if the second name is empty and the first name is not. """ assertIsSubdomainOf(self, b'foo.example.com', b'') def test_caseInsensitiveComparison(self): """ L{dns._isSubdomainOf} does case-insensitive comparison of name labels. """ assertIsSubdomainOf(self, b'foo.example.com', b'EXAMPLE.COM') assertIsSubdomainOf(self, b'FOO.EXAMPLE.COM', b'example.com') class OPTNonStandardAttributes(object): """ Generate byte and instance representations of an L{dns._OPTHeader} where all attributes are set to non-default values. For testing whether attributes have really been read from the byte string during decoding. """ @classmethod def bytes(cls, excludeName=False, excludeOptions=False): """ Return L{bytes} representing an encoded OPT record. @param excludeName: A flag that controls whether to exclude the name field. This allows a non-standard name to be prepended during the test. @type excludeName: L{bool} @param excludeOptions: A flag that controls whether to exclude the RDLEN field. This allows encoded variable options to be appended during the test. @type excludeOptions: L{bool} @return: L{bytes} representing the encoded OPT record returned by L{object}. """ rdlen = b'\x00\x00' # RDLEN 0 if excludeOptions: rdlen = b'' return ( b'\x00' # 0 root zone b'\x00\x29' # type 41 b'\x02\x00' # udpPayloadsize 512 b'\x03' # extendedRCODE 3 b'\x04' # version 4 b'\x80\x00' # DNSSEC OK 1 + Z ) + rdlen @classmethod def object(cls): """ Return a new L{dns._OPTHeader} instance. @return: A L{dns._OPTHeader} instance with attributes that match the encoded record returned by L{bytes}. """ return dns._OPTHeader( udpPayloadSize=512, extendedRCODE=3, version=4, dnssecOK=True) class OPTHeaderTests(ComparisonTestsMixin, unittest.TestCase): """ Tests for L{twisted.names.dns._OPTHeader}. """ def test_interface(self): """ L{dns._OPTHeader} implements L{dns.IEncodable}. """ verifyClass(dns.IEncodable, dns._OPTHeader) def test_name(self): """ L{dns._OPTHeader.name} is an instance attribute whose value is fixed as the root domain """ self.assertEqual(dns._OPTHeader().name, dns.Name(b'')) def test_nameReadonly(self): """ L{dns._OPTHeader.name} is readonly. """ h = dns._OPTHeader() self.assertRaises( AttributeError, setattr, h, 'name', dns.Name(b'example.com')) def test_type(self): """ L{dns._OPTHeader.type} is an instance attribute with fixed value 41. """ self.assertEqual(dns._OPTHeader().type, 41) def test_typeReadonly(self): """ L{dns._OPTHeader.type} is readonly. """ h = dns._OPTHeader() self.assertRaises( AttributeError, setattr, h, 'type', dns.A) def test_udpPayloadSize(self): """ L{dns._OPTHeader.udpPayloadSize} defaults to 4096 as recommended in rfc6891 section-6.2.5. """ self.assertEqual(dns._OPTHeader().udpPayloadSize, 4096) def test_udpPayloadSizeOverride(self): """ L{dns._OPTHeader.udpPayloadSize} can be overridden in the constructor. """ self.assertEqual(dns._OPTHeader(udpPayloadSize=512).udpPayloadSize, 512) def test_extendedRCODE(self): """ L{dns._OPTHeader.extendedRCODE} defaults to 0. """ self.assertEqual(dns._OPTHeader().extendedRCODE, 0) def test_extendedRCODEOverride(self): """ L{dns._OPTHeader.extendedRCODE} can be overridden in the constructor. """ self.assertEqual(dns._OPTHeader(extendedRCODE=1).extendedRCODE, 1) def test_version(self): """ L{dns._OPTHeader.version} defaults to 0. """ self.assertEqual(dns._OPTHeader().version, 0) def test_versionOverride(self): """ L{dns._OPTHeader.version} can be overridden in the constructor. """ self.assertEqual(dns._OPTHeader(version=1).version, 1) def test_dnssecOK(self): """ L{dns._OPTHeader.dnssecOK} defaults to False. """ self.assertFalse(dns._OPTHeader().dnssecOK) def test_dnssecOKOverride(self): """ L{dns._OPTHeader.dnssecOK} can be overridden in the constructor. """ self.assertTrue(dns._OPTHeader(dnssecOK=True).dnssecOK) def test_options(self): """ L{dns._OPTHeader.options} defaults to empty list. """ self.assertEqual(dns._OPTHeader().options, []) def test_optionsOverride(self): """ L{dns._OPTHeader.options} can be overridden in the constructor. """ h = dns._OPTHeader(options=[(1, 1, b'\x00')]) self.assertEqual(h.options, [(1, 1, b'\x00')]) def test_encode(self): """ L{dns._OPTHeader.encode} packs the header fields and writes them to a file like object passed in as an argument. """ b = BytesIO() OPTNonStandardAttributes.object().encode(b) self.assertEqual( b.getvalue(), OPTNonStandardAttributes.bytes() ) def test_encodeWithOptions(self): """ L{dns._OPTHeader.options} is a list of L{dns._OPTVariableOption} instances which are packed into the rdata area of the header. """ h = OPTNonStandardAttributes.object() h.options = [ dns._OPTVariableOption(1, b'foobarbaz'), dns._OPTVariableOption(2, b'qux'), ] b = BytesIO() h.encode(b) self.assertEqual( b.getvalue(), OPTNonStandardAttributes.bytes(excludeOptions=True) + ( b'\x00\x14' # RDLEN 20 b'\x00\x01' # OPTION-CODE b'\x00\x09' # OPTION-LENGTH b'foobarbaz' # OPTION-DATA b'\x00\x02' # OPTION-CODE b'\x00\x03' # OPTION-LENGTH b'qux' # OPTION-DATA )) def test_decode(self): """ L{dns._OPTHeader.decode} unpacks the header fields from a file like object and populates the attributes of an existing L{dns._OPTHeader} instance. """ decodedHeader = dns._OPTHeader() decodedHeader.decode(BytesIO(OPTNonStandardAttributes.bytes())) self.assertEqual( decodedHeader, OPTNonStandardAttributes.object()) def test_decodeAllExpectedBytes(self): """ L{dns._OPTHeader.decode} reads all the bytes of the record that is being decoded. """ # Check that all the input data has been consumed. b = BytesIO(OPTNonStandardAttributes.bytes()) decodedHeader = dns._OPTHeader() decodedHeader.decode(b) self.assertEqual(b.tell(), len(b.getvalue())) def test_decodeOnlyExpectedBytes(self): """ L{dns._OPTHeader.decode} reads only the bytes from the current file position to the end of the record that is being decoded. Trailing bytes are not consumed. """ b = BytesIO(OPTNonStandardAttributes.bytes() + b'xxxx') # Trailing bytes decodedHeader = dns._OPTHeader() decodedHeader.decode(b) self.assertEqual(b.tell(), len(b.getvalue())-len(b'xxxx')) def test_decodeDiscardsName(self): """ L{dns._OPTHeader.decode} discards the name which is encoded in the supplied bytes. The name attribute of the resulting L{dns._OPTHeader} instance will always be L{dns.Name(b'')}. """ b = BytesIO(OPTNonStandardAttributes.bytes(excludeName=True) + b'\x07example\x03com\x00') h = dns._OPTHeader() h.decode(b) self.assertEqual(h.name, dns.Name(b'')) def test_decodeRdlengthTooShort(self): """ L{dns._OPTHeader.decode} raises an exception if the supplied RDLEN is too short. """ b = BytesIO( OPTNonStandardAttributes.bytes(excludeOptions=True) + ( b'\x00\x05' # RDLEN 5 Too short - should be 6 b'\x00\x01' # OPTION-CODE b'\x00\x02' # OPTION-LENGTH b'\x00\x00' # OPTION-DATA )) h = dns._OPTHeader() self.assertRaises(EOFError, h.decode, b) def test_decodeRdlengthTooLong(self): """ L{dns._OPTHeader.decode} raises an exception if the supplied RDLEN is too long. """ b = BytesIO( OPTNonStandardAttributes.bytes(excludeOptions=True) + ( <|fim▁hole|> b'\x00\x02' # OPTION-LENGTH b'\x00\x00' # OPTION-DATA )) h = dns._OPTHeader() self.assertRaises(EOFError, h.decode, b) def test_decodeWithOptions(self): """ If the OPT bytes contain variable options, L{dns._OPTHeader.decode} will populate a list L{dns._OPTHeader.options} with L{dns._OPTVariableOption} instances. """ b = BytesIO( OPTNonStandardAttributes.bytes(excludeOptions=True) + ( b'\x00\x14' # RDLEN 20 b'\x00\x01' # OPTION-CODE b'\x00\x09' # OPTION-LENGTH b'foobarbaz' # OPTION-DATA b'\x00\x02' # OPTION-CODE b'\x00\x03' # OPTION-LENGTH b'qux' # OPTION-DATA )) h = dns._OPTHeader() h.decode(b) self.assertEqual( h.options, [dns._OPTVariableOption(1, b'foobarbaz'), dns._OPTVariableOption(2, b'qux'),] ) def test_fromRRHeader(self): """ L{_OPTHeader.fromRRHeader} accepts an L{RRHeader} instance and returns an L{_OPTHeader} instance whose attribute values have been derived from the C{cls}, C{ttl} and C{payload} attributes of the original header. """ genericHeader = dns.RRHeader( b'example.com', type=dns.OPT, cls=0xffff, ttl=(0xfe << 24 | 0xfd << 16 | True << 15), payload=dns.UnknownRecord(b'\xff\xff\x00\x03abc')) decodedOptHeader = dns._OPTHeader.fromRRHeader(genericHeader) expectedOptHeader = dns._OPTHeader( udpPayloadSize=0xffff, extendedRCODE=0xfe, version=0xfd, dnssecOK=True, options=[dns._OPTVariableOption(code=0xffff, data=b'abc')]) self.assertEqual(decodedOptHeader, expectedOptHeader) def test_repr(self): """ L{dns._OPTHeader.__repr__} displays the name and type and all the fixed and extended header values of the OPT record. """ self.assertEqual( repr(dns._OPTHeader()), '<_OPTHeader ' 'name= ' 'type=41 ' 'udpPayloadSize=4096 ' 'extendedRCODE=0 ' 'version=0 ' 'dnssecOK=False ' 'options=[]>') def test_equalityUdpPayloadSize(self): """ Two L{OPTHeader} instances compare equal if they have the same udpPayloadSize. """ self.assertNormalEqualityImplementation( dns._OPTHeader(udpPayloadSize=512), dns._OPTHeader(udpPayloadSize=512), dns._OPTHeader(udpPayloadSize=4096)) def test_equalityExtendedRCODE(self): """ Two L{OPTHeader} instances compare equal if they have the same extendedRCODE. """ self.assertNormalEqualityImplementation( dns._OPTHeader(extendedRCODE=1), dns._OPTHeader(extendedRCODE=1), dns._OPTHeader(extendedRCODE=2)) def test_equalityVersion(self): """ Two L{OPTHeader} instances compare equal if they have the same version. """ self.assertNormalEqualityImplementation( dns._OPTHeader(version=1), dns._OPTHeader(version=1), dns._OPTHeader(version=2)) def test_equalityDnssecOK(self): """ Two L{OPTHeader} instances compare equal if they have the same dnssecOK flags. """ self.assertNormalEqualityImplementation( dns._OPTHeader(dnssecOK=True), dns._OPTHeader(dnssecOK=True), dns._OPTHeader(dnssecOK=False)) def test_equalityOptions(self): """ Two L{OPTHeader} instances compare equal if they have the same options. """ self.assertNormalEqualityImplementation( dns._OPTHeader(options=[dns._OPTVariableOption(1, b'x')]), dns._OPTHeader(options=[dns._OPTVariableOption(1, b'x')]), dns._OPTHeader(options=[dns._OPTVariableOption(2, b'y')])) class OPTVariableOptionTests(ComparisonTestsMixin, unittest.TestCase): """ Tests for L{dns._OPTVariableOption}. """ def test_interface(self): """ L{dns._OPTVariableOption} implements L{dns.IEncodable}. """ verifyClass(dns.IEncodable, dns._OPTVariableOption) def test_constructorArguments(self): """ L{dns._OPTVariableOption.__init__} requires code and data arguments which are saved as public instance attributes. """ h = dns._OPTVariableOption(1, b'x') self.assertEqual(h.code, 1) self.assertEqual(h.data, b'x') def test_repr(self): """ L{dns._OPTVariableOption.__repr__} displays the code and data of the option. """ self.assertEqual( repr(dns._OPTVariableOption(1, b'x')), '<_OPTVariableOption ' 'code=1 ' "data=x" '>') def test_equality(self): """ Two OPTVariableOption instances compare equal if they have the same code and data values. """ self.assertNormalEqualityImplementation( dns._OPTVariableOption(1, b'x'), dns._OPTVariableOption(1, b'x'), dns._OPTVariableOption(2, b'x')) self.assertNormalEqualityImplementation( dns._OPTVariableOption(1, b'x'), dns._OPTVariableOption(1, b'x'), dns._OPTVariableOption(1, b'y')) def test_encode(self): """ L{dns._OPTVariableOption.encode} encodes the code and data instance attributes to a byte string which also includes the data length. """ o = dns._OPTVariableOption(1, b'foobar') b = BytesIO() o.encode(b) self.assertEqual( b.getvalue(), b'\x00\x01' # OPTION-CODE 1 b'\x00\x06' # OPTION-LENGTH 6 b'foobar' # OPTION-DATA ) def test_decode(self): """ L{dns._OPTVariableOption.decode} is a classmethod that decodes a byte string and returns a L{dns._OPTVariableOption} instance. """ b = BytesIO( b'\x00\x01' # OPTION-CODE 1 b'\x00\x06' # OPTION-LENGTH 6 b'foobar' # OPTION-DATA ) o = dns._OPTVariableOption() o.decode(b) self.assertEqual(o.code, 1) self.assertEqual(o.data, b'foobar') class RaisedArgs(Exception): """ An exception which can be raised by fakes to test that the fake is called with expected arguments. """ def __init__(self, args, kwargs): """ Store the positional and keyword arguments as attributes. @param args: The positional args. @param kwargs: The keyword args. """ self.args = args self.kwargs = kwargs class MessageEmpty(object): """ Generate byte string and constructor arguments for an empty L{dns._EDNSMessage}. """ @classmethod def bytes(cls): """ Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. """ return ( b'\x01\x00' # id: 256 b'\x97' # QR: 1, OPCODE: 2, AA: 0, TC: 0, RD: 1 b'\x8f' # RA: 1, Z, RCODE: 15 b'\x00\x00' # number of queries b'\x00\x00' # number of answers b'\x00\x00' # number of authorities b'\x00\x00' # number of additionals ) @classmethod def kwargs(cls): """ Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. """ return dict( id=256, answer=True, opCode=dns.OP_STATUS, auth=True, trunc=True, recDes=True, recAv=True, rCode=15, ednsVersion=None, ) class MessageTruncated(object): """ An empty response message whose TR bit is set to 1. """ @classmethod def bytes(cls): """ Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. """ return ( b'\x01\x00' # ID: 256 b'\x82' # QR: 1, OPCODE: 0, AA: 0, TC: 1, RD: 0 b'\x00' # RA: 0, Z, RCODE: 0 b'\x00\x00' # Number of queries b'\x00\x00' # Number of answers b'\x00\x00' # Number of authorities b'\x00\x00' # Number of additionals ) @classmethod def kwargs(cls): """ Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. """ return dict( id=256, answer=1, opCode=0, auth=0, trunc=1, recDes=0, recAv=0, rCode=0, ednsVersion=None,) class MessageNonAuthoritative(object): """ A minimal non-authoritative message. """ @classmethod def bytes(cls): """ Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. """ return ( b'\x01\x00' # ID 256 b'\x00' # QR: 0, OPCODE: 0, AA: 0, TC: 0, RD: 0 b'\x00' # RA: 0, Z, RCODE: 0 b'\x00\x00' # Query count b'\x00\x01' # Answer count b'\x00\x00' # Authorities count b'\x00\x00' # Additionals count # Answer b'\x00' # RR NAME (root) b'\x00\x01' # RR TYPE 1 (A) b'\x00\x01' # RR CLASS 1 (IN) b'\x00\x00\x00\x00' # RR TTL b'\x00\x04' # RDLENGTH 4 b'\x01\x02\x03\x04' # IPv4 1.2.3.4 ) @classmethod def kwargs(cls): """ Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. """ return dict( id=256, auth=0, ednsVersion=None, answers=[ dns.RRHeader( b'', payload=dns.Record_A('1.2.3.4', ttl=0), auth=False)]) class MessageAuthoritative(object): """ A minimal authoritative message. """ @classmethod def bytes(cls): """ Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. """ return ( b'\x01\x00' # ID: 256 b'\x04' # QR: 0, OPCODE: 0, AA: 1, TC: 0, RD: 0 b'\x00' # RA: 0, Z, RCODE: 0 b'\x00\x00' # Query count b'\x00\x01' # Answer count b'\x00\x00' # Authorities count b'\x00\x00' # Additionals count # Answer b'\x00' # RR NAME (root) b'\x00\x01' # RR TYPE 1 (A) b'\x00\x01' # RR CLASS 1 (IN) b'\x00\x00\x00\x00' # RR TTL b'\x00\x04' # RDLENGTH 4 b'\x01\x02\x03\x04' # IPv4 1.2.3.4 ) @classmethod def kwargs(cls): """ Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. """ return dict( id=256, auth=1, ednsVersion=None, answers=[ dns.RRHeader( b'', payload=dns.Record_A('1.2.3.4', ttl=0), auth=True)]) class MessageComplete: """ An example of a fully populated non-edns response message. Contains name compression, answers, authority, and additional records. """ @classmethod def bytes(cls): """ Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. """ return ( b'\x01\x00' # ID: 256 b'\x95' # QR: 1, OPCODE: 2, AA: 1, TC: 0, RD: 1 b'\x8f' # RA: 1, Z, RCODE: 15 b'\x00\x01' # Query count b'\x00\x01' # Answer count b'\x00\x01' # Authorities count b'\x00\x01' # Additionals count # Query begins at Byte 12 b'\x07example\x03com\x00' # QNAME b'\x00\x06' # QTYPE 6 (SOA) b'\x00\x01' # QCLASS 1 (IN) # Answers b'\xc0\x0c' # RR NAME (compression ref b12) b'\x00\x06' # RR TYPE 6 (SOA) b'\x00\x01' # RR CLASS 1 (IN) b'\xff\xff\xff\xff' # RR TTL b'\x00\x27' # RDLENGTH 39 b'\x03ns1\xc0\x0c' # Mname (ns1.example.com (compression ref b15) b'\x0ahostmaster\xc0\x0c' # rname (hostmaster.example.com) b'\xff\xff\xff\xfe' # Serial b'\x7f\xff\xff\xfd' # Refresh b'\x7f\xff\xff\xfc' # Retry b'\x7f\xff\xff\xfb' # Expire b'\xff\xff\xff\xfa' # Minimum # Authority b'\xc0\x0c' # RR NAME (example.com compression ref b12) b'\x00\x02' # RR TYPE 2 (NS) b'\x00\x01' # RR CLASS 1 (IN) b'\xff\xff\xff\xff' # RR TTL b'\x00\x02' # RDLENGTH b'\xc0\x29' # RDATA (ns1.example.com (compression ref b41) # Additional b'\xc0\x29' # RR NAME (ns1.example.com compression ref b41) b'\x00\x01' # RR TYPE 1 (A) b'\x00\x01' # RR CLASS 1 (IN) b'\xff\xff\xff\xff' # RR TTL b'\x00\x04' # RDLENGTH b'\x05\x06\x07\x08' # RDATA 5.6.7.8 ) @classmethod def kwargs(cls): """ Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. """ return dict( id=256, answer=1, opCode=dns.OP_STATUS, auth=1, recDes=1, recAv=1, rCode=15, ednsVersion=None, queries=[dns.Query(b'example.com', dns.SOA)], answers=[ dns.RRHeader( b'example.com', type=dns.SOA, ttl=0xffffffff, auth=True, payload=dns.Record_SOA( ttl=0xffffffff, mname=b'ns1.example.com', rname=b'hostmaster.example.com', serial=0xfffffffe, refresh=0x7ffffffd, retry=0x7ffffffc, expire=0x7ffffffb, minimum=0xfffffffa, ))], authority=[ dns.RRHeader( b'example.com', type=dns.NS, ttl=0xffffffff, auth=True, payload=dns.Record_NS( 'ns1.example.com', ttl=0xffffffff))], additional=[ dns.RRHeader( b'ns1.example.com', type=dns.A, ttl=0xffffffff, auth=True, payload=dns.Record_A( '5.6.7.8', ttl=0xffffffff))]) class MessageEDNSQuery(object): """ A minimal EDNS query message. """ @classmethod def bytes(cls): """ Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. """ return ( b'\x00\x00' # ID: 0 b'\x00' # QR: 0, OPCODE: 0, AA: 0, TC: 0, RD: 0 b'\x00' # RA: 0, Z, RCODE: 0 b'\x00\x01' # Queries count b'\x00\x00' # Anwers count b'\x00\x00' # Authority count b'\x00\x01' # Additionals count # Queries b'\x03www\x07example\x03com\x00' # QNAME b'\x00\x01' # QTYPE (A) b'\x00\x01' # QCLASS (IN) # Additional OPT record b'\x00' # NAME (.) b'\x00\x29' # TYPE (OPT 41) b'\x10\x00' # UDP Payload Size (4096) b'\x00' # Extended RCODE b'\x03' # EDNS version b'\x00\x00' # DO: False + Z b'\x00\x00' # RDLENGTH ) @classmethod def kwargs(cls): """ Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. """ return dict( id=0, answer=0, opCode=dns.OP_QUERY, auth=0, recDes=0, recAv=0, rCode=0, ednsVersion=3, dnssecOK=False, queries=[dns.Query(b'www.example.com', dns.A)], additional=[]) class MessageEDNSComplete(object): """ An example of a fully populated edns response message. Contains name compression, answers, authority, and additional records. """ @classmethod def bytes(cls): """ Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. """ return ( b'\x01\x00' # ID: 256 b'\x95' # QR: 1, OPCODE: 2, AA: 1, TC: 0, RD: 1 b'\xbf' # RA: 1, AD: 1, RCODE: 15 b'\x00\x01' # Query count b'\x00\x01' # Answer count b'\x00\x01' # Authorities count b'\x00\x02' # Additionals count # Query begins at Byte 12 b'\x07example\x03com\x00' # QNAME b'\x00\x06' # QTYPE 6 (SOA) b'\x00\x01' # QCLASS 1 (IN) # Answers b'\xc0\x0c' # RR NAME (compression ref b12) b'\x00\x06' # RR TYPE 6 (SOA) b'\x00\x01' # RR CLASS 1 (IN) b'\xff\xff\xff\xff' # RR TTL b'\x00\x27' # RDLENGTH 39 b'\x03ns1\xc0\x0c' # mname (ns1.example.com (compression ref b15) b'\x0ahostmaster\xc0\x0c' # rname (hostmaster.example.com) b'\xff\xff\xff\xfe' # Serial b'\x7f\xff\xff\xfd' # Refresh b'\x7f\xff\xff\xfc' # Retry b'\x7f\xff\xff\xfb' # Expire b'\xff\xff\xff\xfa' # Minimum # Authority b'\xc0\x0c' # RR NAME (example.com compression ref b12) b'\x00\x02' # RR TYPE 2 (NS) b'\x00\x01' # RR CLASS 1 (IN) b'\xff\xff\xff\xff' # RR TTL b'\x00\x02' # RDLENGTH b'\xc0\x29' # RDATA (ns1.example.com (compression ref b41) # Additional b'\xc0\x29' # RR NAME (ns1.example.com compression ref b41) b'\x00\x01' # RR TYPE 1 (A) b'\x00\x01' # RR CLASS 1 (IN) b'\xff\xff\xff\xff' # RR TTL b'\x00\x04' # RDLENGTH b'\x05\x06\x07\x08' # RDATA 5.6.7.8 # Additional OPT record b'\x00' # NAME (.) b'\x00\x29' # TYPE (OPT 41) b'\x04\x00' # UDP Payload Size (1024) b'\x00' # Extended RCODE b'\x03' # EDNS version b'\x80\x00' # DO: True + Z b'\x00\x00' # RDLENGTH ) @classmethod def kwargs(cls): """ Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. """ return dict( id=256, answer=1, opCode=dns.OP_STATUS, auth=1, trunc=0, recDes=1, recAv=1, rCode=15, ednsVersion=3, dnssecOK=True, authenticData=True, checkingDisabled=True, maxSize=1024, queries=[dns.Query(b'example.com', dns.SOA)], answers=[ dns.RRHeader( b'example.com', type=dns.SOA, ttl=0xffffffff, auth=True, payload=dns.Record_SOA( ttl=0xffffffff, mname=b'ns1.example.com', rname=b'hostmaster.example.com', serial=0xfffffffe, refresh=0x7ffffffd, retry=0x7ffffffc, expire=0x7ffffffb, minimum=0xfffffffa, ))], authority=[ dns.RRHeader( b'example.com', type=dns.NS, ttl=0xffffffff, auth=True, payload=dns.Record_NS( 'ns1.example.com', ttl=0xffffffff))], additional=[ dns.RRHeader( b'ns1.example.com', type=dns.A, ttl=0xffffffff, auth=True, payload=dns.Record_A( '5.6.7.8', ttl=0xffffffff))]) class MessageEDNSExtendedRCODE(object): """ An example of an EDNS message with an extended RCODE. """ @classmethod def bytes(cls): """ Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. """ return ( b'\x00\x00' b'\x00' b'\x0c' # RA: 0, Z, RCODE: 12 b'\x00\x00' b'\x00\x00' b'\x00\x00' b'\x00\x01' # 1 additionals # Additional OPT record b'\x00' b'\x00\x29' b'\x10\x00' b'\xab' # Extended RCODE: 171 b'\x00' b'\x00\x00' b'\x00\x00' ) @classmethod def kwargs(cls): """ Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. """ return dict( id=0, answer=False, opCode=dns.OP_QUERY, auth=False, trunc=False, recDes=False, recAv=False, rCode=0xabc, # Combined OPT extended RCODE + Message RCODE ednsVersion=0, dnssecOK=False, maxSize=4096, queries=[], answers=[], authority=[], additional=[], ) class MessageComparable(FancyEqMixin, FancyStrMixin, object): """ A wrapper around L{dns.Message} which is comparable so that it can be tested using some of the L{dns._EDNSMessage} tests. """ showAttributes = compareAttributes = ( 'id', 'answer', 'opCode', 'auth', 'trunc', 'recDes', 'recAv', 'rCode', 'queries', 'answers', 'authority', 'additional') def __init__(self, original): self.original = original def __getattr__(self, key): return getattr(self.original, key) def verifyConstructorArgument(testCase, cls, argName, defaultVal, altVal, attrName=None): """ Verify that an attribute has the expected default value and that a corresponding argument passed to a constructor is assigned to that attribute. @param testCase: The L{TestCase} whose assert methods will be called. @type testCase: L{unittest.TestCase} @param cls: The constructor under test. @type cls: L{type} @param argName: The name of the constructor argument under test. @type argName: L{str} @param defaultVal: The expected default value of C{attrName} / C{argName} @type defaultVal: L{object} @param altVal: A value which is different from the default. Used to test that supplied constructor arguments are actually assigned to the correct attribute. @type altVal: L{object} @param attrName: The name of the attribute under test if different from C{argName}. Defaults to C{argName} @type attrName: L{str} """ if attrName is None: attrName = argName actual = {} expected = {'defaultVal': defaultVal, 'altVal': altVal} o = cls() actual['defaultVal'] = getattr(o, attrName) o = cls(**{argName: altVal}) actual['altVal'] = getattr(o, attrName) testCase.assertEqual(expected, actual) class ConstructorTestsMixin(object): """ Helper methods for verifying default attribute values and corresponding constructor arguments. """ def _verifyConstructorArgument(self, argName, defaultVal, altVal): """ Wrap L{verifyConstructorArgument} to provide simpler interface for testing Message and _EDNSMessage constructor arguments. @param argName: The name of the constructor argument. @param defaultVal: The expected default value. @param altVal: An alternative value which is expected to be assigned to a correspondingly named attribute. """ verifyConstructorArgument(testCase=self, cls=self.messageFactory, argName=argName, defaultVal=defaultVal, altVal=altVal) def _verifyConstructorFlag(self, argName, defaultVal): """ Wrap L{verifyConstructorArgument} to provide simpler interface for testing _EDNSMessage constructor flags. @param argName: The name of the constructor flag argument @param defaultVal: The expected default value of the flag """ assert defaultVal in (True, False) verifyConstructorArgument(testCase=self, cls=self.messageFactory, argName=argName, defaultVal=defaultVal, altVal=not defaultVal,) class CommonConstructorTestsMixin(object): """ Tests for constructor arguments and their associated attributes that are common to both L{twisted.names.dns._EDNSMessage} and L{dns.Message}. TestCase classes that use this mixin must provide a C{messageFactory} method which accepts any argment supported by L{dns.Message.__init__}. TestCases must also mixin ConstructorTestsMixin which provides some custom assertions for testing constructor arguments. """ def test_id(self): """ L{dns._EDNSMessage.id} defaults to C{0} and can be overridden in the constructor. """ self._verifyConstructorArgument('id', defaultVal=0, altVal=1) def test_answer(self): """ L{dns._EDNSMessage.answer} defaults to C{False} and can be overridden in the constructor. """ self._verifyConstructorFlag('answer', defaultVal=False) def test_opCode(self): """ L{dns._EDNSMessage.opCode} defaults to L{dns.OP_QUERY} and can be overridden in the constructor. """ self._verifyConstructorArgument( 'opCode', defaultVal=dns.OP_QUERY, altVal=dns.OP_STATUS) def test_auth(self): """ L{dns._EDNSMessage.auth} defaults to C{False} and can be overridden in the constructor. """ self._verifyConstructorFlag('auth', defaultVal=False) def test_trunc(self): """ L{dns._EDNSMessage.trunc} defaults to C{False} and can be overridden in the constructor. """ self._verifyConstructorFlag('trunc', defaultVal=False) def test_recDes(self): """ L{dns._EDNSMessage.recDes} defaults to C{False} and can be overridden in the constructor. """ self._verifyConstructorFlag('recDes', defaultVal=False) def test_recAv(self): """ L{dns._EDNSMessage.recAv} defaults to C{False} and can be overridden in the constructor. """ self._verifyConstructorFlag('recAv', defaultVal=False) def test_rCode(self): """ L{dns._EDNSMessage.rCode} defaults to C{0} and can be overridden in the constructor. """ self._verifyConstructorArgument('rCode', defaultVal=0, altVal=123) def test_maxSize(self): """ L{dns._EDNSMessage.maxSize} defaults to C{512} and can be overridden in the constructor. """ self._verifyConstructorArgument('maxSize', defaultVal=512, altVal=1024) def test_queries(self): """ L{dns._EDNSMessage.queries} defaults to C{[]}. """ self.assertEqual(self.messageFactory().queries, []) def test_answers(self): """ L{dns._EDNSMessage.answers} defaults to C{[]}. """ self.assertEqual(self.messageFactory().answers, []) def test_authority(self): """ L{dns._EDNSMessage.authority} defaults to C{[]}. """ self.assertEqual(self.messageFactory().authority, []) def test_additional(self): """ L{dns._EDNSMessage.additional} defaults to C{[]}. """ self.assertEqual(self.messageFactory().additional, []) class EDNSMessageConstructorTests(ConstructorTestsMixin, CommonConstructorTestsMixin, unittest.SynchronousTestCase): """ Tests for L{twisted.names.dns._EDNSMessage} constructor arguments that are shared with L{dns.Message}. """ messageFactory = dns._EDNSMessage class MessageConstructorTests(ConstructorTestsMixin, CommonConstructorTestsMixin, unittest.SynchronousTestCase): """ Tests for L{twisted.names.dns.Message} constructor arguments that are shared with L{dns._EDNSMessage}. """ messageFactory = dns.Message class EDNSMessageSpecificsTests(ConstructorTestsMixin, unittest.SynchronousTestCase): """ Tests for L{dns._EDNSMessage}. These tests are for L{dns._EDNSMessage} APIs which are not shared with L{dns.Message}. """ messageFactory = dns._EDNSMessage def test_ednsVersion(self): """ L{dns._EDNSMessage.ednsVersion} defaults to C{0} and can be overridden in the constructor. """ self._verifyConstructorArgument( 'ednsVersion', defaultVal=0, altVal=None) def test_dnssecOK(self): """ L{dns._EDNSMessage.dnssecOK} defaults to C{False} and can be overridden in the constructor. """ self._verifyConstructorFlag('dnssecOK', defaultVal=False) def test_authenticData(self): """ L{dns._EDNSMessage.authenticData} defaults to C{False} and can be overridden in the constructor. """ self._verifyConstructorFlag('authenticData', defaultVal=False) def test_checkingDisabled(self): """ L{dns._EDNSMessage.checkingDisabled} defaults to C{False} and can be overridden in the constructor. """ self._verifyConstructorFlag('checkingDisabled', defaultVal=False) def test_queriesOverride(self): """ L{dns._EDNSMessage.queries} can be overridden in the constructor. """ msg = self.messageFactory(queries=[dns.Query(b'example.com')]) self.assertEqual( msg.queries, [dns.Query(b'example.com')]) def test_answersOverride(self): """ L{dns._EDNSMessage.answers} can be overridden in the constructor. """ msg = self.messageFactory( answers=[ dns.RRHeader( b'example.com', payload=dns.Record_A('1.2.3.4'))]) self.assertEqual( msg.answers, [dns.RRHeader(b'example.com', payload=dns.Record_A('1.2.3.4'))]) def test_authorityOverride(self): """ L{dns._EDNSMessage.authority} can be overridden in the constructor. """ msg = self.messageFactory( authority=[ dns.RRHeader( b'example.com', type=dns.SOA, payload=dns.Record_SOA())]) self.assertEqual( msg.authority, [dns.RRHeader(b'example.com', type=dns.SOA, payload=dns.Record_SOA())]) def test_additionalOverride(self): """ L{dns._EDNSMessage.authority} can be overridden in the constructor. """ msg = self.messageFactory( additional=[ dns.RRHeader( b'example.com', payload=dns.Record_A('1.2.3.4'))]) self.assertEqual( msg.additional, [dns.RRHeader(b'example.com', payload=dns.Record_A('1.2.3.4'))]) def test_reprDefaults(self): """ L{dns._EDNSMessage.__repr__} omits field values and sections which are identical to their defaults. The id field value is always shown. """ self.assertEqual( '<_EDNSMessage id=0>', repr(self.messageFactory()) ) def test_reprFlagsIfSet(self): """ L{dns._EDNSMessage.__repr__} displays flags if they are L{True}. """ m = self.messageFactory(answer=True, auth=True, trunc=True, recDes=True, recAv=True, authenticData=True, checkingDisabled=True, dnssecOK=True) self.assertEqual( '<_EDNSMessage ' 'id=0 ' 'flags=answer,auth,trunc,recDes,recAv,authenticData,' 'checkingDisabled,dnssecOK' '>', repr(m), ) def test_reprNonDefautFields(self): """ L{dns._EDNSMessage.__repr__} displays field values if they differ from their defaults. """ m = self.messageFactory(id=10, opCode=20, rCode=30, maxSize=40, ednsVersion=50) self.assertEqual( '<_EDNSMessage ' 'id=10 ' 'opCode=20 ' 'rCode=30 ' 'maxSize=40 ' 'ednsVersion=50' '>', repr(m), ) def test_reprNonDefaultSections(self): """ L{dns.Message.__repr__} displays sections which differ from their defaults. """ m = self.messageFactory() m.queries = [1, 2, 3] m.answers = [4, 5, 6] m.authority = [7, 8, 9] m.additional = [10, 11, 12] self.assertEqual( '<_EDNSMessage ' 'id=0 ' 'queries=[1, 2, 3] ' 'answers=[4, 5, 6] ' 'authority=[7, 8, 9] ' 'additional=[10, 11, 12]' '>', repr(m), ) def test_fromStrCallsMessageFactory(self): """ L{dns._EDNSMessage.fromString} calls L{dns._EDNSMessage._messageFactory} to create a new L{dns.Message} instance which is used to decode the supplied bytes. """ class FakeMessageFactory(object): """ Fake message factory. """ def fromStr(self, *args, **kwargs): """ Fake fromStr method which raises the arguments it was passed. @param args: positional arguments @param kwargs: keyword arguments """ raise RaisedArgs(args, kwargs) m = dns._EDNSMessage() m._messageFactory = FakeMessageFactory dummyBytes = object() e = self.assertRaises(RaisedArgs, m.fromStr, dummyBytes) self.assertEqual( ((dummyBytes,), {}), (e.args, e.kwargs) ) def test_fromStrCallsFromMessage(self): """ L{dns._EDNSMessage.fromString} calls L{dns._EDNSMessage._fromMessage} with a L{dns.Message} instance """ m = dns._EDNSMessage() class FakeMessageFactory(): """ Fake message factory. """ def fromStr(self, bytes): """ A noop fake version of fromStr @param bytes: the bytes to be decoded """ fakeMessage = FakeMessageFactory() m._messageFactory = lambda: fakeMessage def fakeFromMessage(*args, **kwargs): raise RaisedArgs(args, kwargs) m._fromMessage = fakeFromMessage e = self.assertRaises(RaisedArgs, m.fromStr, b'') self.assertEqual( ((fakeMessage,), {}), (e.args, e.kwargs) ) def test_toStrCallsToMessage(self): """ L{dns._EDNSMessage.toStr} calls L{dns._EDNSMessage._toMessage} """ m = dns._EDNSMessage() def fakeToMessage(*args, **kwargs): raise RaisedArgs(args, kwargs) m._toMessage = fakeToMessage e = self.assertRaises(RaisedArgs, m.toStr) self.assertEqual( ((), {}), (e.args, e.kwargs) ) def test_toStrCallsToMessageToStr(self): """ L{dns._EDNSMessage.toStr} calls C{toStr} on the message returned by L{dns._EDNSMessage._toMessage}. """ m = dns._EDNSMessage() dummyBytes = object() class FakeMessage(object): """ Fake Message """ def toStr(self): """ Fake toStr which returns dummyBytes. @return: dummyBytes """ return dummyBytes def fakeToMessage(*args, **kwargs): return FakeMessage() m._toMessage = fakeToMessage self.assertEqual( dummyBytes, m.toStr() ) class EDNSMessageEqualityTests(ComparisonTestsMixin, unittest.SynchronousTestCase): """ Tests for equality between L(dns._EDNSMessage} instances. These tests will not work with L{dns.Message} because it does not use L{twisted.python.util.FancyEqMixin}. """ messageFactory = dns._EDNSMessage def test_id(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same id. """ self.assertNormalEqualityImplementation( self.messageFactory(id=1), self.messageFactory(id=1), self.messageFactory(id=2), ) def test_answer(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same answer flag. """ self.assertNormalEqualityImplementation( self.messageFactory(answer=True), self.messageFactory(answer=True), self.messageFactory(answer=False), ) def test_opCode(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same opCode. """ self.assertNormalEqualityImplementation( self.messageFactory(opCode=dns.OP_STATUS), self.messageFactory(opCode=dns.OP_STATUS), self.messageFactory(opCode=dns.OP_INVERSE), ) def test_auth(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same auth flag. """ self.assertNormalEqualityImplementation( self.messageFactory(auth=True), self.messageFactory(auth=True), self.messageFactory(auth=False), ) def test_trunc(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same trunc flag. """ self.assertNormalEqualityImplementation( self.messageFactory(trunc=True), self.messageFactory(trunc=True), self.messageFactory(trunc=False), ) def test_recDes(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same recDes flag. """ self.assertNormalEqualityImplementation( self.messageFactory(recDes=True), self.messageFactory(recDes=True), self.messageFactory(recDes=False), ) def test_recAv(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same recAv flag. """ self.assertNormalEqualityImplementation( self.messageFactory(recAv=True), self.messageFactory(recAv=True), self.messageFactory(recAv=False), ) def test_rCode(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same rCode. """ self.assertNormalEqualityImplementation( self.messageFactory(rCode=16), self.messageFactory(rCode=16), self.messageFactory(rCode=15), ) def test_ednsVersion(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same ednsVersion. """ self.assertNormalEqualityImplementation( self.messageFactory(ednsVersion=1), self.messageFactory(ednsVersion=1), self.messageFactory(ednsVersion=None), ) def test_dnssecOK(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same dnssecOK. """ self.assertNormalEqualityImplementation( self.messageFactory(dnssecOK=True), self.messageFactory(dnssecOK=True), self.messageFactory(dnssecOK=False), ) def test_authenticData(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same authenticData flags. """ self.assertNormalEqualityImplementation( self.messageFactory(authenticData=True), self.messageFactory(authenticData=True), self.messageFactory(authenticData=False), ) def test_checkingDisabled(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same checkingDisabled flags. """ self.assertNormalEqualityImplementation( self.messageFactory(checkingDisabled=True), self.messageFactory(checkingDisabled=True), self.messageFactory(checkingDisabled=False), ) def test_maxSize(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same maxSize. """ self.assertNormalEqualityImplementation( self.messageFactory(maxSize=2048), self.messageFactory(maxSize=2048), self.messageFactory(maxSize=1024), ) def test_queries(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same queries. """ self.assertNormalEqualityImplementation( self.messageFactory(queries=[dns.Query(b'example.com')]), self.messageFactory(queries=[dns.Query(b'example.com')]), self.messageFactory(queries=[dns.Query(b'example.org')]), ) def test_answers(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same answers. """ self.assertNormalEqualityImplementation( self.messageFactory(answers=[dns.RRHeader( b'example.com', payload=dns.Record_A('1.2.3.4'))]), self.messageFactory(answers=[dns.RRHeader( b'example.com', payload=dns.Record_A('1.2.3.4'))]), self.messageFactory(answers=[dns.RRHeader( b'example.org', payload=dns.Record_A('4.3.2.1'))]), ) def test_authority(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same authority records. """ self.assertNormalEqualityImplementation( self.messageFactory(authority=[dns.RRHeader( b'example.com', type=dns.SOA, payload=dns.Record_SOA())]), self.messageFactory(authority=[dns.RRHeader( b'example.com', type=dns.SOA, payload=dns.Record_SOA())]), self.messageFactory(authority=[dns.RRHeader( b'example.org', type=dns.SOA, payload=dns.Record_SOA())]), ) def test_additional(self): """ Two L{dns._EDNSMessage} instances compare equal if they have the same additional records. """ self.assertNormalEqualityImplementation( self.messageFactory(additional=[dns.RRHeader( b'example.com', payload=dns.Record_A('1.2.3.4'))]), self.messageFactory(additional=[dns.RRHeader( b'example.com', payload=dns.Record_A('1.2.3.4'))]), self.messageFactory(additional=[dns.RRHeader( b'example.org', payload=dns.Record_A('1.2.3.4'))]), ) class StandardEncodingTestsMixin(object): """ Tests for the encoding and decoding of various standard (not EDNS) messages. These tests should work with both L{dns._EDNSMessage} and L{dns.Message}. TestCase classes that use this mixin must provide a C{messageFactory} method which accepts any argment supported by L{dns._EDNSMessage.__init__}. EDNS specific arguments may be discarded if not supported by the message class under construction. """ def test_emptyMessageEncode(self): """ An empty message can be encoded. """ self.assertEqual( self.messageFactory(**MessageEmpty.kwargs()).toStr(), MessageEmpty.bytes()) def test_emptyMessageDecode(self): """ An empty message byte sequence can be decoded. """ m = self.messageFactory() m.fromStr(MessageEmpty.bytes()) self.assertEqual(m, self.messageFactory(**MessageEmpty.kwargs())) def test_completeQueryEncode(self): """ A fully populated query message can be encoded. """ self.assertEqual( self.messageFactory(**MessageComplete.kwargs()).toStr(), MessageComplete.bytes()) def test_completeQueryDecode(self): """ A fully populated message byte string can be decoded. """ m = self.messageFactory() m.fromStr(MessageComplete.bytes()), self.assertEqual(m, self.messageFactory(**MessageComplete.kwargs())) def test_NULL(self): """ A I{NULL} record with an arbitrary payload can be encoded and decoded as part of a message. """ bytes = b''.join([dns._ord2bytes(i) for i in range(256)]) rec = dns.Record_NULL(bytes) rr = dns.RRHeader(b'testname', dns.NULL, payload=rec) msg1 = self.messageFactory() msg1.answers.append(rr) s = msg1.toStr() msg2 = self.messageFactory() msg2.fromStr(s) self.assertIsInstance(msg2.answers[0].payload, dns.Record_NULL) self.assertEqual(msg2.answers[0].payload.payload, bytes) def test_nonAuthoritativeMessageEncode(self): """ If the message C{authoritative} attribute is set to 0, the encoded bytes will have AA bit 0. """ self.assertEqual( self.messageFactory(**MessageNonAuthoritative.kwargs()).toStr(), MessageNonAuthoritative.bytes()) def test_nonAuthoritativeMessageDecode(self): """ The L{dns.RRHeader} instances created by a message from a non-authoritative message byte string are marked as not authoritative. """ m = self.messageFactory() m.fromStr(MessageNonAuthoritative.bytes()) self.assertEqual( m, self.messageFactory(**MessageNonAuthoritative.kwargs())) def test_authoritativeMessageEncode(self): """ If the message C{authoritative} attribute is set to 1, the encoded bytes will have AA bit 1. """ self.assertEqual( self.messageFactory(**MessageAuthoritative.kwargs()).toStr(), MessageAuthoritative.bytes()) def test_authoritativeMessageDecode(self): """ The message and its L{dns.RRHeader} instances created by C{decode} from an authoritative message byte string, are marked as authoritative. """ m = self.messageFactory() m.fromStr(MessageAuthoritative.bytes()) self.assertEqual( m, self.messageFactory(**MessageAuthoritative.kwargs())) def test_truncatedMessageEncode(self): """ If the message C{trunc} attribute is set to 1 the encoded bytes will have TR bit 1. """ self.assertEqual( self.messageFactory(**MessageTruncated.kwargs()).toStr(), MessageTruncated.bytes()) def test_truncatedMessageDecode(self): """ The message instance created by decoding a truncated message is marked as truncated. """ m = self.messageFactory() m.fromStr(MessageTruncated.bytes()) self.assertEqual(m, self.messageFactory(**MessageTruncated.kwargs())) class EDNSMessageStandardEncodingTests(StandardEncodingTestsMixin, unittest.SynchronousTestCase): """ Tests for the encoding and decoding of various standard (non-EDNS) messages by L{dns._EDNSMessage}. """ messageFactory = dns._EDNSMessage class MessageStandardEncodingTests(StandardEncodingTestsMixin, unittest.SynchronousTestCase): """ Tests for the encoding and decoding of various standard (non-EDNS) messages by L{dns.Message}. """ @staticmethod def messageFactory(**kwargs): """ This function adapts constructor arguments expected by _EDNSMessage.__init__ to arguments suitable for use with the Message.__init__. Also handles the fact that unlike L{dns._EDNSMessage}, L{dns.Message.__init__} does not accept queries, answers etc as arguments. Also removes any L{dns._EDNSMessage} specific arguments. @param args: The positional arguments which will be passed to L{dns.Message.__init__}. @param kwargs: The keyword arguments which will be stripped of EDNS specific arguments before being passed to L{dns.Message.__init__}. @return: An L{dns.Message} instance. """ queries = kwargs.pop('queries', []) answers = kwargs.pop('answers', []) authority = kwargs.pop('authority', []) additional = kwargs.pop('additional', []) kwargs.pop('ednsVersion', None) m = dns.Message(**kwargs) m.queries = queries m.answers = answers m.authority = authority m.additional = additional return MessageComparable(m) class EDNSMessageEDNSEncodingTests(unittest.SynchronousTestCase): """ Tests for the encoding and decoding of various EDNS messages. These test will not work with L{dns.Message}. """ messageFactory = dns._EDNSMessage def test_ednsMessageDecodeStripsOptRecords(self): """ The L(_EDNSMessage} instance created by L{dns._EDNSMessage.decode} from an EDNS query never includes OPT records in the additional section. """ m = self.messageFactory() m.fromStr(MessageEDNSQuery.bytes()) self.assertEqual(m.additional, []) def test_ednsMessageDecodeMultipleOptRecords(self): """ An L(_EDNSMessage} instance created from a byte string containing multiple I{OPT} records will discard all the C{OPT} records. C{ednsVersion} will be set to L{None}. @see: U{https://tools.ietf.org/html/rfc6891#section-6.1.1} """ m = dns.Message() m.additional = [ dns._OPTHeader(version=2), dns._OPTHeader(version=3)] ednsMessage = dns._EDNSMessage() ednsMessage.fromStr(m.toStr()) self.assertIsNone(ednsMessage.ednsVersion) def test_fromMessageCopiesSections(self): """ L{dns._EDNSMessage._fromMessage} returns an L{_EDNSMessage} instance whose queries, answers, authority and additional lists are copies (not references to) the original message lists. """ standardMessage = dns.Message() standardMessage.fromStr(MessageEDNSQuery.bytes()) ednsMessage = dns._EDNSMessage._fromMessage(standardMessage) duplicates = [] for attrName in ('queries', 'answers', 'authority', 'additional'): if (getattr(standardMessage, attrName) is getattr(ednsMessage, attrName)): duplicates.append(attrName) if duplicates: self.fail( 'Message and _EDNSMessage shared references to the following ' 'section lists after decoding: %s' % (duplicates,)) def test_toMessageCopiesSections(self): """ L{dns._EDNSMessage.toStr} makes no in place changes to the message instance. """ ednsMessage = dns._EDNSMessage(ednsVersion=1) ednsMessage.toStr() self.assertEqual(ednsMessage.additional, []) def test_optHeaderPosition(self): """ L{dns._EDNSMessage} can decode OPT records, regardless of their position in the additional records section. "The OPT RR MAY be placed anywhere within the additional data section." @see: U{https://tools.ietf.org/html/rfc6891#section-6.1.1} """ # XXX: We need an _OPTHeader.toRRHeader method. See #6779. b = BytesIO() optRecord = dns._OPTHeader(version=1) optRecord.encode(b) optRRHeader = dns.RRHeader() b.seek(0) optRRHeader.decode(b) m = dns.Message() m.additional = [optRRHeader] actualMessages = [] actualMessages.append(dns._EDNSMessage._fromMessage(m).ednsVersion) m.additional.append(dns.RRHeader(type=dns.A)) actualMessages.append( dns._EDNSMessage._fromMessage(m).ednsVersion) m.additional.insert(0, dns.RRHeader(type=dns.A)) actualMessages.append( dns._EDNSMessage._fromMessage(m).ednsVersion) self.assertEqual( [1] * 3, actualMessages ) def test_ednsDecode(self): """ The L(_EDNSMessage} instance created by L{dns._EDNSMessage.fromStr} derives its edns specific values (C{ednsVersion}, etc) from the supplied OPT record. """ m = self.messageFactory() m.fromStr(MessageEDNSComplete.bytes()) self.assertEqual(m, self.messageFactory(**MessageEDNSComplete.kwargs())) def test_ednsEncode(self): """ The L(_EDNSMessage} instance created by L{dns._EDNSMessage.toStr} encodes its edns specific values (C{ednsVersion}, etc) into an OPT record added to the additional section. """ self.assertEqual( self.messageFactory(**MessageEDNSComplete.kwargs()).toStr(), MessageEDNSComplete.bytes()) def test_extendedRcodeEncode(self): """ The L(_EDNSMessage.toStr} encodes the extended I{RCODE} (>=16) by assigning the lower 4bits to the message RCODE field and the upper 4bits to the OPT pseudo record. """ self.assertEqual( self.messageFactory(**MessageEDNSExtendedRCODE.kwargs()).toStr(), MessageEDNSExtendedRCODE.bytes()) def test_extendedRcodeDecode(self): """ The L(_EDNSMessage} instance created by L{dns._EDNSMessage.fromStr} derives RCODE from the supplied OPT record. """ m = self.messageFactory() m.fromStr(MessageEDNSExtendedRCODE.bytes()) self.assertEqual( m, self.messageFactory(**MessageEDNSExtendedRCODE.kwargs())) def test_extendedRcodeZero(self): """ Note that EXTENDED-RCODE value 0 indicates that an unextended RCODE is in use (values 0 through 15). https://tools.ietf.org/html/rfc6891#section-6.1.3 """ ednsMessage = self.messageFactory(rCode=15, ednsVersion=0) standardMessage = ednsMessage._toMessage() self.assertEqual( (15, 0), (standardMessage.rCode, standardMessage.additional[0].extendedRCODE) ) class ResponseFromMessageTests(unittest.SynchronousTestCase): """ Tests for L{dns._responseFromMessage}. """ def test_responseFromMessageResponseType(self): """ L{dns.Message._responseFromMessage} is a constructor function which generates a new I{answer} message from an existing L{dns.Message} like instance. """ request = dns.Message() response = dns._responseFromMessage(responseConstructor=dns.Message, message=request) self.assertIsNot(request, response) def test_responseType(self): """ L{dns._responseFromMessage} returns a new instance of C{cls} """ class SuppliedClass(object): id = 1 queries = [] expectedClass = dns.Message self.assertIsInstance( dns._responseFromMessage(responseConstructor=expectedClass, message=SuppliedClass()), expectedClass ) def test_responseId(self): """ L{dns._responseFromMessage} copies the C{id} attribute of the original message. """ self.assertEqual( 1234, dns._responseFromMessage(responseConstructor=dns.Message, message=dns.Message(id=1234)).id ) def test_responseAnswer(self): """ L{dns._responseFromMessage} sets the C{answer} flag to L{True} """ request = dns.Message() response = dns._responseFromMessage(responseConstructor=dns.Message, message=request) self.assertEqual( (False, True), (request.answer, response.answer) ) def test_responseQueries(self): """ L{dns._responseFromMessage} copies the C{queries} attribute of the original message. """ request = dns.Message() expectedQueries = [object(), object(), object()] request.queries = expectedQueries[:] self.assertEqual( expectedQueries, dns._responseFromMessage(responseConstructor=dns.Message, message=request).queries ) def test_responseKwargs(self): """ L{dns._responseFromMessage} accepts other C{kwargs} which are assigned to the new message before it is returned. """ self.assertEqual( 123, dns._responseFromMessage( responseConstructor=dns.Message, message=dns.Message(), rCode=123).rCode ) class Foo(object): """ An example class for use in L{dns._compactRepr} tests. It follows the pattern of initialiser settable flags, fields and sections found in L{dns.Message} and L{dns._EDNSMessage}. """ def __init__(self, field1=1, field2=2, alwaysShowField='AS', flagTrue=True, flagFalse=False, section1=None): """ Set some flags, fields and sections as public attributes. """ self.field1 = field1 self.field2 = field2 self.alwaysShowField = alwaysShowField self.flagTrue = flagTrue self.flagFalse = flagFalse if section1 is None: section1 = [] self.section1 = section1 def __repr__(self): """ Call L{dns._compactRepr} to generate a string representation. """ return dns._compactRepr( self, alwaysShow='alwaysShowField'.split(), fieldNames='field1 field2 alwaysShowField'.split(), flagNames='flagTrue flagFalse'.split(), sectionNames='section1 section2'.split() ) class CompactReprTests(unittest.SynchronousTestCase): """ Tests for L[dns._compactRepr}. """ messageFactory = Foo def test_defaults(self): """ L{dns._compactRepr} omits field values and sections which have the default value. Flags which are True are always shown. """ self.assertEqual( "<Foo alwaysShowField='AS' flags=flagTrue>", repr(self.messageFactory()) ) def test_flagsIfSet(self): """ L{dns._compactRepr} displays flags if they have a non-default value. """ m = self.messageFactory(flagTrue=True, flagFalse=True) self.assertEqual( '<Foo ' "alwaysShowField='AS' " 'flags=flagTrue,flagFalse' '>', repr(m), ) def test_nonDefautFields(self): """ L{dns._compactRepr} displays field values if they differ from their defaults. """ m = self.messageFactory(field1=10, field2=20) self.assertEqual( '<Foo ' 'field1=10 ' 'field2=20 ' "alwaysShowField='AS' " 'flags=flagTrue' '>', repr(m), ) def test_nonDefaultSections(self): """ L{dns._compactRepr} displays sections which differ from their defaults. """ m = self.messageFactory() m.section1 = [1, 1, 1] m.section2 = [2, 2, 2] self.assertEqual( '<Foo ' "alwaysShowField='AS' " 'flags=flagTrue ' 'section1=[1, 1, 1] ' 'section2=[2, 2, 2]' '>', repr(m), )<|fim▁end|>
b'\x00\x07' # RDLEN 7 Too long - should be 6 b'\x00\x01' # OPTION-CODE
<|file_name|>array_tokenizer.hpp<|end_file_name|><|fim▁begin|>/* * arraytokenizer.hpp * * Created on: Sep 28, 2015 * Author: zmij */ #ifndef LIB_PG_ASYNC_INCLUDE_TIP_DB_PG_DETAIL_ARRAY_TOKENIZER_HPP_ #define LIB_PG_ASYNC_INCLUDE_TIP_DB_PG_DETAIL_ARRAY_TOKENIZER_HPP_ #include <tip/db/pg/detail/tokenizer_base.hpp> namespace tip { namespace db { namespace pg { namespace io { namespace detail { template < typename InputIterator > class array_tokenizer { public: typedef InputIterator iterator_type; typedef tokenizer_base< InputIterator, '{', '}' > tokenizer_type; public: template< typename OutputIterator > array_tokenizer(iterator_type& begin, iterator_type end, OutputIterator out) { tokenizer_type(begin, end, out); } }; } /* namespace detail */ } // namespace io<|fim▁hole|>} /* namespace db */ } /* namespace tip */ #endif /* LIB_PG_ASYNC_INCLUDE_TIP_DB_PG_DETAIL_ARRAY_TOKENIZER_HPP_ */<|fim▁end|>
} /* namespace pg */
<|file_name|>InfinispanTicketRegistry.java<|end_file_name|><|fim▁begin|>package org.apereo.cas.ticket.registry; import org.apereo.cas.ticket.Ticket; import org.infinispan.Cache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collection; import java.util.concurrent.TimeUnit; /** * This is {@link InfinispanTicketRegistry}. Infinispan is a distributed in-memory * key/value data store with optional schema. * It offers advanced functionality such as transactions, events, querying and distributed processing. * See <a href="http://infinispan.org/features/">http://infinispan.org/features/</a> for more info. * * @author Misagh Moayyed * @since 4.2.0 */ public class InfinispanTicketRegistry extends AbstractTicketRegistry { private static final Logger LOGGER = LoggerFactory.getLogger(InfinispanTicketRegistry.class); private Cache<String, Ticket> cache; /** * Instantiates a new Infinispan ticket registry. * * @param cache the cache */ public InfinispanTicketRegistry(final Cache<String, Ticket> cache) { this.cache = cache; LOGGER.info("Setting up Infinispan Ticket Registry..."); } @Override public Ticket updateTicket(final Ticket ticket) {<|fim▁hole|> @Override public void addTicket(final Ticket ticketToAdd) { final Ticket ticket = encodeTicket(ticketToAdd); final long idleTime = ticket.getExpirationPolicy().getTimeToIdle() <= 0 ? ticket.getExpirationPolicy().getTimeToLive() : ticket.getExpirationPolicy().getTimeToIdle(); LOGGER.debug("Adding ticket [{}] to cache store to live [{}] seconds and stay idle for [{}]", ticket.getId(), ticket.getExpirationPolicy().getTimeToLive(), idleTime); this.cache.put(ticket.getId(), ticket, ticket.getExpirationPolicy().getTimeToLive(), TimeUnit.SECONDS, idleTime, TimeUnit.SECONDS); } @Override public Ticket getTicket(final String ticketId) { final String encTicketId = encodeTicketId(ticketId); if (ticketId == null) { return null; } return Ticket.class.cast(cache.get(encTicketId)); } @Override public boolean deleteSingleTicket(final String ticketId) { this.cache.remove(ticketId); return getTicket(ticketId) == null; } @Override public long deleteAll() { final int size = this.cache.size(); this.cache.clear(); return size; } /** * Retrieve all tickets from the registry. * <p> * Note! Usage of this method can be computational and I/O intensive and should not be used for other than * debugging. * * @return collection of tickets currently stored in the registry. Tickets * might or might not be valid i.e. expired. */ @Override public Collection<Ticket> getTickets() { return decodeTickets(this.cache.values()); } }<|fim▁end|>
this.cache.put(ticket.getId(), ticket); return ticket; }
<|file_name|>comp-1882.component.ts<|end_file_name|><|fim▁begin|>/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */<|fim▁hole|> selector: 'app-comp-1882', templateUrl: './comp-1882.component.html', styleUrls: ['./comp-1882.component.css'] }) export class Comp1882Component implements OnInit { constructor() { } ngOnInit() { } }<|fim▁end|>
import { Component, OnInit } from '@angular/core'; @Component({
<|file_name|>keychain.rs<|end_file_name|><|fim▁begin|>use std::collections::HashMap; use super::operations::BurnchainOpSigner; use stacks::chainstate::stacks::{StacksTransactionSigner, TransactionAuth, StacksPublicKey, StacksPrivateKey, StacksAddress}; use stacks::address::AddressHashMode; use stacks::burnchains::{BurnchainSigner, PrivateKey}; use stacks::util::vrf::{VRF, VRFProof, VRFPublicKey, VRFPrivateKey}; use stacks::util::hash::{Sha256Sum}; #[derive(Clone)] pub struct Keychain { secret_keys: Vec<StacksPrivateKey>, threshold: u16, hash_mode: AddressHashMode, pub hashed_secret_state: Sha256Sum, microblocks_secret_keys: Vec<StacksPrivateKey>, vrf_secret_keys: Vec<VRFPrivateKey>, vrf_map: HashMap<VRFPublicKey, VRFPrivateKey>, } impl Keychain { pub fn new(secret_keys: Vec<StacksPrivateKey>, threshold: u16, hash_mode: AddressHashMode) -> Keychain { // Compute hashed secret state let hashed_secret_state = { let mut buf : Vec<u8> = secret_keys.iter() .flat_map(|sk| sk.to_bytes()) .collect(); buf.extend_from_slice(&[(threshold >> 8) as u8, (threshold & 0xff) as u8, hash_mode as u8]); Sha256Sum::from_data(&buf[..]) }; Self { hash_mode, hashed_secret_state, microblocks_secret_keys: vec![], secret_keys, threshold, vrf_secret_keys: vec![], vrf_map: HashMap::new(), } } pub fn default(seed: Vec<u8>) -> Keychain { let mut re_hashed_seed = seed; let secret_key = loop { match StacksPrivateKey::from_slice(&re_hashed_seed[..]) { Ok(sk) => break sk, Err(_) => re_hashed_seed = Sha256Sum::from_data(&re_hashed_seed[..]).as_bytes().to_vec() } }; let threshold = 1; let hash_mode = AddressHashMode::SerializeP2PKH; Keychain::new(vec![secret_key], threshold, hash_mode) } pub fn rotate_vrf_keypair(&mut self, block_height: u64) -> VRFPublicKey { let mut seed = { let mut secret_state = self.hashed_secret_state.to_bytes().to_vec(); secret_state.extend_from_slice(&block_height.to_be_bytes()[..]); Sha256Sum::from_data(&secret_state) }; // Not every 256-bit number is a valid Ed25519 secret key. // As such, we continuously generate seeds through re-hashing until one works. let sk = loop { match VRFPrivateKey::from_bytes(seed.as_bytes()) { Some(sk) => break sk, None => seed = Sha256Sum::from_data(seed.as_bytes()) } }; let pk = VRFPublicKey::from_private(&sk); <|fim▁hole|> pk } pub fn rotate_microblock_keypair(&mut self) -> StacksPrivateKey { let mut seed = match self.microblocks_secret_keys.last() { // First key is the hash of the secret state None => self.hashed_secret_state, // Next key is the hash of the last Some(last_sk) => Sha256Sum::from_data(&last_sk.to_bytes()[..]), }; // Not every 256-bit number is a valid secp256k1 secret key. // As such, we continuously generate seeds through re-hashing until one works. let mut sk = loop { match StacksPrivateKey::from_slice(&seed.to_bytes()[..]) { Ok(sk) => break sk, Err(_) => seed = Sha256Sum::from_data(seed.as_bytes()) } }; sk.set_compress_public(true); self.microblocks_secret_keys.push(sk.clone()); sk } pub fn get_microblock_key(&self) -> Option<StacksPrivateKey> { self.microblocks_secret_keys.last().cloned() } pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) -> () { let num_keys = if self.secret_keys.len() < self.threshold as usize { self.secret_keys.len() } else { self.threshold as usize }; for i in 0..num_keys { tx_signer.sign_origin(&self.secret_keys[i]).unwrap(); } } /// Given a VRF public key, generates a VRF Proof pub fn generate_proof(&self, vrf_pk: &VRFPublicKey, bytes: &[u8; 32]) -> Option<VRFProof> { // Retrieve the corresponding VRF secret key let vrf_sk = match self.vrf_map.get(vrf_pk) { Some(vrf_pk) => vrf_pk, None => return None }; // Generate the proof let proof = VRF::prove(&vrf_sk, &bytes.to_vec()); // Ensure that the proof is valid by verifying let is_valid = match VRF::verify(vrf_pk, &proof, &bytes.to_vec()) { Ok(v) => v, Err(_) => false }; assert!(is_valid); Some(proof) } /// Given the keychain's secret keys, computes and returns the corresponding Stack address. /// Note: Testnet bit is hardcoded. pub fn get_address(&self) -> StacksAddress { let public_keys = self.secret_keys.iter().map(|ref pk| StacksPublicKey::from_private(pk)).collect(); StacksAddress::from_public_keys( self.hash_mode.to_version_testnet(), &self.hash_mode, self.threshold as usize, &public_keys).unwrap() } pub fn address_from_burnchain_signer(signer: &BurnchainSigner) -> StacksAddress { StacksAddress::from_public_keys( signer.hash_mode.to_version_testnet(), &signer.hash_mode, signer.num_sigs, &signer.public_keys).unwrap() } pub fn get_burnchain_signer(&self) -> BurnchainSigner { let public_keys = self.secret_keys.iter().map(|ref pk| StacksPublicKey::from_private(pk)).collect(); BurnchainSigner { hash_mode: self.hash_mode, num_sigs: self.threshold as usize, public_keys } } pub fn get_transaction_auth(&self) -> Option<TransactionAuth> { match self.hash_mode { AddressHashMode::SerializeP2PKH => TransactionAuth::from_p2pkh(&self.secret_keys[0]), AddressHashMode::SerializeP2SH => TransactionAuth::from_p2sh(&self.secret_keys, self.threshold), AddressHashMode::SerializeP2WPKH => TransactionAuth::from_p2wpkh(&self.secret_keys[0]), AddressHashMode::SerializeP2WSH => TransactionAuth::from_p2wsh(&self.secret_keys, self.threshold), } } pub fn origin_address(&self) -> Option<StacksAddress> { match self.get_transaction_auth() { // Note: testnet hard-coded Some(auth) => Some(auth.origin().address_testnet()), None => None } } pub fn generate_op_signer(&self) -> BurnchainOpSigner { BurnchainOpSigner::new(self.secret_keys[0], false) } }<|fim▁end|>
self.vrf_secret_keys.push(sk.clone()); self.vrf_map.insert(pk.clone(), sk);
<|file_name|>dynamic_component_loader_spec.js<|end_file_name|><|fim▁begin|>System.register(["angular2/test_lib", "angular2/src/test_lib/test_bed", "angular2/src/core/annotations_impl/annotations", "angular2/src/core/annotations_impl/view", "angular2/src/core/compiler/dynamic_component_loader", "angular2/src/core/compiler/element_ref", "angular2/src/directives/if", "angular2/src/render/dom/direct_dom_renderer", "angular2/src/dom/dom_adapter"], function($__export) { "use strict"; var AsyncTestCompleter, beforeEach, ddescribe, xdescribe, describe, el, dispatchEvent, expect, iit, inject, beforeEachBindings, it, xit, TestBed, Component, View, DynamicComponentLoader, ElementRef, If, DirectDomRenderer, DOM, ImperativeViewComponentUsingNgComponent, ChildComp, DynamicallyCreatedComponentService, DynamicComp, DynamicallyCreatedCmp, DynamicallyLoaded, DynamicallyLoaded2, DynamicallyLoadedWithHostProps, Location, MyComp; function main() { describe('DynamicComponentLoader', function() { describe("loading into existing location", (function() { it('should work', inject([TestBed, AsyncTestCompleter], (function(tb, async) { tb.overrideView(MyComp, new View({ template: '<dynamic-comp #dynamic></dynamic-comp>', directives: [DynamicComp] })); tb.createView(MyComp).then((function(view) { var dynamicComponent = view.rawView.locals.get("dynamic");<|fim▁hole|> expect(view.rootNodes).toHaveText('hello'); async.done(); })); })); }))); it('should inject dependencies of the dynamically-loaded component', inject([TestBed, AsyncTestCompleter], (function(tb, async) { tb.overrideView(MyComp, new View({ template: '<dynamic-comp #dynamic></dynamic-comp>', directives: [DynamicComp] })); tb.createView(MyComp).then((function(view) { var dynamicComponent = view.rawView.locals.get("dynamic"); dynamicComponent.done.then((function(ref) { expect(ref.instance.dynamicallyCreatedComponentService).toBeAnInstanceOf(DynamicallyCreatedComponentService); async.done(); })); })); }))); it('should allow to destroy and create them via viewcontainer directives', inject([TestBed, AsyncTestCompleter], (function(tb, async) { tb.overrideView(MyComp, new View({ template: '<div><dynamic-comp #dynamic template="if: ctxBoolProp"></dynamic-comp></div>', directives: [DynamicComp, If] })); tb.createView(MyComp).then((function(view) { view.context.ctxBoolProp = true; view.detectChanges(); var dynamicComponent = view.rawView.viewContainers[0].views[0].locals.get("dynamic"); dynamicComponent.done.then((function(_) { view.detectChanges(); expect(view.rootNodes).toHaveText('hello'); view.context.ctxBoolProp = false; view.detectChanges(); expect(view.rawView.viewContainers[0].views.length).toBe(0); expect(view.rootNodes).toHaveText(''); view.context.ctxBoolProp = true; view.detectChanges(); var dynamicComponent = view.rawView.viewContainers[0].views[0].locals.get("dynamic"); return dynamicComponent.done; })).then((function(_) { view.detectChanges(); expect(view.rootNodes).toHaveText('hello'); async.done(); })); })); }))); })); describe("loading next to an existing location", (function() { it('should work', inject([DynamicComponentLoader, TestBed, AsyncTestCompleter], (function(loader, tb, async) { tb.overrideView(MyComp, new View({ template: '<div><location #loc></location></div>', directives: [Location] })); tb.createView(MyComp).then((function(view) { var location = view.rawView.locals.get("loc"); loader.loadNextToExistingLocation(DynamicallyLoaded, location.elementRef).then((function(ref) { expect(view.rootNodes).toHaveText("Location;DynamicallyLoaded;"); async.done(); })); })); }))); it('should return a disposable component ref', inject([DynamicComponentLoader, TestBed, AsyncTestCompleter], (function(loader, tb, async) { tb.overrideView(MyComp, new View({ template: '<div><location #loc></location></div>', directives: [Location] })); tb.createView(MyComp).then((function(view) { var location = view.rawView.locals.get("loc"); loader.loadNextToExistingLocation(DynamicallyLoaded, location.elementRef).then((function(ref) { loader.loadNextToExistingLocation(DynamicallyLoaded2, location.elementRef).then((function(ref2) { expect(view.rootNodes).toHaveText("Location;DynamicallyLoaded;DynamicallyLoaded2;"); ref2.dispose(); expect(view.rootNodes).toHaveText("Location;DynamicallyLoaded;"); async.done(); })); })); })); }))); it('should update host properties', inject([DynamicComponentLoader, TestBed, AsyncTestCompleter], (function(loader, tb, async) { tb.overrideView(MyComp, new View({ template: '<div><location #loc></location></div>', directives: [Location] })); tb.createView(MyComp).then((function(view) { var location = view.rawView.locals.get("loc"); loader.loadNextToExistingLocation(DynamicallyLoadedWithHostProps, location.elementRef).then((function(ref) { ref.instance.id = "new value"; view.detectChanges(); var newlyInsertedElement = DOM.childNodesAsList(view.rootNodes[0])[1]; expect(newlyInsertedElement.id).toEqual("new value"); async.done(); })); })); }))); })); describe('loading into a new location', (function() { it('should allow to create, update and destroy components', inject([TestBed, AsyncTestCompleter], (function(tb, async) { tb.overrideView(MyComp, new View({ template: '<imp-ng-cmp #impview></imp-ng-cmp>', directives: [ImperativeViewComponentUsingNgComponent] })); tb.createView(MyComp).then((function(view) { var userViewComponent = view.rawView.locals.get("impview"); userViewComponent.done.then((function(childComponentRef) { view.detectChanges(); expect(view.rootNodes).toHaveText('hello'); childComponentRef.instance.ctxProp = 'new'; view.detectChanges(); expect(view.rootNodes).toHaveText('new'); childComponentRef.dispose(); expect(view.rootNodes).toHaveText(''); async.done(); })); })); }))); })); }); } $__export("main", main); return { setters: [function($__m) { AsyncTestCompleter = $__m.AsyncTestCompleter; beforeEach = $__m.beforeEach; ddescribe = $__m.ddescribe; xdescribe = $__m.xdescribe; describe = $__m.describe; el = $__m.el; dispatchEvent = $__m.dispatchEvent; expect = $__m.expect; iit = $__m.iit; inject = $__m.inject; beforeEachBindings = $__m.beforeEachBindings; it = $__m.it; xit = $__m.xit; }, function($__m) { TestBed = $__m.TestBed; }, function($__m) { Component = $__m.Component; }, function($__m) { View = $__m.View; }, function($__m) { DynamicComponentLoader = $__m.DynamicComponentLoader; }, function($__m) { ElementRef = $__m.ElementRef; }, function($__m) { If = $__m.If; }, function($__m) { DirectDomRenderer = $__m.DirectDomRenderer; }, function($__m) { DOM = $__m.DOM; }], execute: function() { ImperativeViewComponentUsingNgComponent = (function() { var ImperativeViewComponentUsingNgComponent = function ImperativeViewComponentUsingNgComponent(self, dynamicComponentLoader, renderer) { var div = el('<div></div>'); renderer.setImperativeComponentRootNodes(self.parentView.render, self.boundElementIndex, [div]); this.done = dynamicComponentLoader.loadIntoNewLocation(ChildComp, self, div, null); }; return ($traceurRuntime.createClass)(ImperativeViewComponentUsingNgComponent, {}, {}); }()); Object.defineProperty(ImperativeViewComponentUsingNgComponent, "annotations", {get: function() { return [new Component({selector: 'imp-ng-cmp'}), new View({renderer: 'imp-ng-cmp-renderer'})]; }}); Object.defineProperty(ImperativeViewComponentUsingNgComponent, "parameters", {get: function() { return [[ElementRef], [DynamicComponentLoader], [DirectDomRenderer]]; }}); ChildComp = (function() { var ChildComp = function ChildComp() { this.ctxProp = 'hello'; }; return ($traceurRuntime.createClass)(ChildComp, {}, {}); }()); Object.defineProperty(ChildComp, "annotations", {get: function() { return [new Component({selector: 'child-cmp'}), new View({template: '{{ctxProp}}'})]; }}); DynamicallyCreatedComponentService = (function() { var DynamicallyCreatedComponentService = function DynamicallyCreatedComponentService() { ; }; return ($traceurRuntime.createClass)(DynamicallyCreatedComponentService, {}, {}); }()); DynamicComp = (function() { var DynamicComp = function DynamicComp(loader, location) { this.done = loader.loadIntoExistingLocation(DynamicallyCreatedCmp, location); }; return ($traceurRuntime.createClass)(DynamicComp, {}, {}); }()); Object.defineProperty(DynamicComp, "annotations", {get: function() { return [new Component({selector: 'dynamic-comp'})]; }}); Object.defineProperty(DynamicComp, "parameters", {get: function() { return [[DynamicComponentLoader], [ElementRef]]; }}); DynamicallyCreatedCmp = (function() { var DynamicallyCreatedCmp = function DynamicallyCreatedCmp(a) { this.greeting = "hello"; this.dynamicallyCreatedComponentService = a; }; return ($traceurRuntime.createClass)(DynamicallyCreatedCmp, {}, {}); }()); Object.defineProperty(DynamicallyCreatedCmp, "annotations", {get: function() { return [new Component({ selector: 'hello-cmp', injectables: [DynamicallyCreatedComponentService] }), new View({template: "{{greeting}}"})]; }}); Object.defineProperty(DynamicallyCreatedCmp, "parameters", {get: function() { return [[DynamicallyCreatedComponentService]]; }}); DynamicallyLoaded = (function() { var DynamicallyLoaded = function DynamicallyLoaded() { ; }; return ($traceurRuntime.createClass)(DynamicallyLoaded, {}, {}); }()); Object.defineProperty(DynamicallyLoaded, "annotations", {get: function() { return [new Component({selector: 'dummy'}), new View({template: "DynamicallyLoaded;"})]; }}); DynamicallyLoaded2 = (function() { var DynamicallyLoaded2 = function DynamicallyLoaded2() { ; }; return ($traceurRuntime.createClass)(DynamicallyLoaded2, {}, {}); }()); Object.defineProperty(DynamicallyLoaded2, "annotations", {get: function() { return [new Component({selector: 'dummy'}), new View({template: "DynamicallyLoaded2;"})]; }}); DynamicallyLoadedWithHostProps = (function() { var DynamicallyLoadedWithHostProps = function DynamicallyLoadedWithHostProps() { this.id = "default"; }; return ($traceurRuntime.createClass)(DynamicallyLoadedWithHostProps, {}, {}); }()); Object.defineProperty(DynamicallyLoadedWithHostProps, "annotations", {get: function() { return [new Component({ selector: 'dummy', hostProperties: {'id': 'id'} }), new View({template: "DynamicallyLoadedWithHostProps;"})]; }}); Location = (function() { var Location = function Location(elementRef) { this.elementRef = elementRef; }; return ($traceurRuntime.createClass)(Location, {}, {}); }()); Object.defineProperty(Location, "annotations", {get: function() { return [new Component({selector: 'location'}), new View({template: "Location;"})]; }}); Object.defineProperty(Location, "parameters", {get: function() { return [[ElementRef]]; }}); MyComp = (function() { var MyComp = function MyComp() { this.ctxBoolProp = false; }; return ($traceurRuntime.createClass)(MyComp, {}, {}); }()); Object.defineProperty(MyComp, "annotations", {get: function() { return [new Component({selector: 'my-comp'}), new View({directives: []})]; }}); } }; }); //# sourceMappingURL=dynamic_component_loader_spec.es6.map //# sourceMappingURL=./dynamic_component_loader_spec.js.map<|fim▁end|>
expect(dynamicComponent).toBeAnInstanceOf(DynamicComp); dynamicComponent.done.then((function(_) { view.detectChanges();
<|file_name|>OptimizerRules.cpp<|end_file_name|><|fim▁begin|>//////////////////////////////////////////////////////////////////////////////// /// @brief rules for the query optimizer /// /// @file /// /// DISCLAIMER /// /// Copyright 2010-2014 triagens GmbH, Cologne, Germany /// /// Licensed under the Apache License, Version 2.0 (the "License"); /// you may not use this file except in compliance with the License. /// You may obtain a copy of the License at /// /// http://www.apache.org/licenses/LICENSE-2.0 /// /// Unless required by applicable law or agreed to in writing, software /// distributed under the License is distributed on an "AS IS" BASIS, /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. /// See the License for the specific language governing permissions and /// limitations under the License. /// /// Copyright holder is triAGENS GmbH, Cologne, Germany /// /// @author Max Neunhoeffer /// @author Jan Steemann /// @author Copyright 2014, triagens GmbH, Cologne, Germany //////////////////////////////////////////////////////////////////////////////// #include "OptimizerRules.h" #include "Aql/AggregateNode.h" #include "Aql/AggregationOptions.h" #include "Aql/ClusterNodes.h" #include "Aql/ConditionFinder.h" #include "Aql/ExecutionEngine.h" #include "Aql/ExecutionNode.h" #include "Aql/Function.h" #include "Aql/Index.h" #include "Aql/IndexNode.h" #include "Aql/ModificationNodes.h" #include "Aql/SortCondition.h" #include "Aql/SortNode.h" #include "Aql/TraversalConditionFinder.h" #include "Aql/Variable.h" #include "Aql/types.h" #include "Basics/json-utilities.h" using namespace triagens::aql; using Json = triagens::basics::Json; using EN = triagens::aql::ExecutionNode; // ----------------------------------------------------------------------------- // --SECTION-- rules for the optimizer // ----------------------------------------------------------------------------- //////////////////////////////////////////////////////////////////////////////// /// @brief adds a SORT operation for IN right-hand side operands //////////////////////////////////////////////////////////////////////////////// void triagens::aql::sortInValuesRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { bool modified = false; std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::FILTER, true)); for (auto const& n : nodes) { // filter nodes always have one input variable auto varsUsedHere = n->getVariablesUsedHere(); TRI_ASSERT(varsUsedHere.size() == 1); // now check who introduced our variable auto variable = varsUsedHere[0]; auto setter = plan->getVarSetBy(variable->id); if (setter == nullptr || setter->getType() != EN::CALCULATION) { // filter variable was not introduced by a calculation. continue; } // filter variable was introduced a CalculationNode. now check the expression auto s = static_cast<CalculationNode*>(setter); auto filterExpression = s->expression(); auto inNode = filterExpression->nodeForModification(); TRI_ASSERT(inNode != nullptr); // check the filter condition if ((inNode->type != NODE_TYPE_OPERATOR_BINARY_IN && inNode->type != NODE_TYPE_OPERATOR_BINARY_NIN) || inNode->canThrow() || ! inNode->isDeterministic()) { // we better not tamper with this filter continue; } auto rhs = inNode->getMember(1); if (rhs->type != NODE_TYPE_REFERENCE) { continue; } auto loop = n->getLoop(); if (loop == nullptr) { // FILTER is not used inside a loop. so it will be used at most once // not need to sort the IN values then continue; } variable = static_cast<Variable const*>(rhs->getData()); setter = plan->getVarSetBy(variable->id); if (setter == nullptr || (setter->getType() != EN::CALCULATION && setter->getType() != EN::SUBQUERY)) { // variable itself was not introduced by a calculation. continue; } if (loop == setter->getLoop()) { // the FILTER and its value calculation are contained in the same loop // this means the FILTER will be executed as many times as its value // calculation. sorting the IN values will not provide a benefit here continue; } static size_t const Threshold = 8; auto ast = plan->getAst(); AstNode const* originalArg = nullptr; if (setter->getType() == EN::CALCULATION) { AstNode const* originalNode = static_cast<CalculationNode*>(setter)->expression()->node(); TRI_ASSERT(originalNode != nullptr); AstNode const* testNode = originalNode; if (originalNode->type == NODE_TYPE_FCALL && static_cast<Function const*>(originalNode->getData())->externalName == "NOOPT") { // bypass NOOPT(...) TRI_ASSERT(originalNode->numMembers() == 1); auto args = originalNode->getMember(0); if (args->numMembers() > 0) { testNode = args->getMember(0); } } if (testNode->type == NODE_TYPE_VALUE || testNode->type == NODE_TYPE_OBJECT) { // not really usable... continue; } if (testNode->type == NODE_TYPE_ARRAY && testNode->numMembers() < Threshold) { // number of values is below threshold continue; } if (testNode->isSorted()) { // already sorted continue; } originalArg = originalNode; } else { TRI_ASSERT(setter->getType() == EN::SUBQUERY); auto sub = static_cast<SubqueryNode*>(setter); // estimate items in subquery size_t nrItems = 0; sub->getSubquery()->getCost(nrItems); if (nrItems < Threshold) { continue; } originalArg = ast->createNodeReference(sub->outVariable()); } TRI_ASSERT(originalArg != nullptr); auto args = ast->createNodeArray(); args->addMember(originalArg); auto sorted = ast->createNodeFunctionCall("SORTED_UNIQUE", args); auto outVar = ast->variables()->createTemporaryVariable(); ExecutionNode* calculationNode = nullptr; auto expression = new Expression(ast, sorted); try { calculationNode = new CalculationNode(plan, plan->nextId(), expression, outVar); } catch (...) { delete expression; throw; } plan->registerNode(calculationNode); // make the new node a parent of the original calculation node calculationNode->addDependency(setter); auto const& oldParents = setter->getParents(); TRI_ASSERT(! oldParents.empty()); calculationNode->addParent(oldParents[0]); oldParents[0]->removeDependencies(); oldParents[0]->addDependency(calculationNode); setter->setParent(calculationNode); if (setter->getType() == EN::CALCULATION) { // mark the original node as being removable, even if it can throw // this is special as the optimizer will normally not remove any nodes // if they throw - even when fully unused otherwise static_cast<CalculationNode*>(setter)->canRemoveIfThrows(true); } // finally adjust the variable inside the IN calculation inNode->changeMember(1, ast->createNodeReference(outVar)); // set sortedness bit for the IN operator inNode->setBoolValue(true); modified = true; } opt->addPlan(plan, rule, modified); } //////////////////////////////////////////////////////////////////////////////// /// @brief remove redundant sorts /// this rule modifies the plan in place: /// - sorts that are covered by earlier sorts will be removed //////////////////////////////////////////////////////////////////////////////// void triagens::aql::removeRedundantSortsRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::SORT, true)); if (nodes.empty()) { // quick exit opt->addPlan(plan, rule, false); return; } std::unordered_set<ExecutionNode*> toUnlink; triagens::basics::StringBuffer buffer(TRI_UNKNOWN_MEM_ZONE); for (auto const& n : nodes) { if (toUnlink.find(n) != toUnlink.end()) { // encountered a sort node that we already deleted continue; } auto const sortNode = static_cast<SortNode*>(n); auto sortInfo = sortNode->getSortInformation(plan, &buffer); if (sortInfo.isValid && ! sortInfo.criteria.empty()) { // we found a sort that we can understand std::vector<ExecutionNode*> stack; sortNode->addDependencies(stack); int nodesRelyingOnSort = 0; while (! stack.empty()) { auto current = stack.back(); stack.pop_back(); if (current->getType() == EN::SORT) { // we found another sort. now check if they are compatible! auto other = static_cast<SortNode*>(current)->getSortInformation(plan, &buffer); switch (sortInfo.isCoveredBy(other)) { case SortInformation::unequal: { // different sort criteria if (nodesRelyingOnSort == 0) { // a sort directly followed by another sort: now remove one of them if (other.canThrow || ! other.isDeterministic) { // if the sort can throw or is non-deterministic, we must not remove it break; } if (sortNode->isStable()) { // we should not optimize predecessors of a stable sort (used in a COLLECT node) // the stable sort is for a reason, and removing any predecessors sorts might // change the result break; } // remove sort that is a direct predecessor of a sort toUnlink.emplace(current); } break; } case SortInformation::otherLessAccurate: { toUnlink.emplace(current); break; } case SortInformation::ourselvesLessAccurate: { // the sort at the start of the pipeline makes the sort at the end // superfluous, so we'll remove it toUnlink.emplace(n); break; } case SortInformation::allEqual: { // the sort at the end of the pipeline makes the sort at the start // superfluous, so we'll remove it toUnlink.emplace(current); break; } } } else if (current->getType() == EN::FILTER) { // ok: a filter does not depend on sort order } else if (current->getType() == EN::CALCULATION) { // ok: a filter does not depend on sort order only if it does not throw if (current->canThrow()) { ++nodesRelyingOnSort; } } else if (current->getType() == EN::ENUMERATE_LIST || current->getType() == EN::ENUMERATE_COLLECTION || current->getType() == EN::TRAVERSAL) { // ok, but we cannot remove two different sorts if one of these node types is between them // example: in the following query, the one sort will be optimized away: // FOR i IN [ { a: 1 }, { a: 2 } , { a: 3 } ] SORT i.a ASC SORT i.a DESC RETURN i // but in the following query, the sorts will stay: // FOR i IN [ { a: 1 }, { a: 2 } , { a: 3 } ] SORT i.a ASC LET a = i.a SORT i.a DESC RETURN i ++nodesRelyingOnSort; } else { // abort at all other type of nodes. we cannot remove a sort beyond them // this includes COLLECT and LIMIT break; } if (! current->hasDependency()) { // node either has no or more than one dependency. we don't know what to do and must abort // note: this will also handle Singleton nodes break; } current->addDependencies(stack); } if (toUnlink.find(n) == toUnlink.end() && sortNode->simplify(plan)) { // sort node had only constant expressions. it will make no difference if we execute it or not // so we can remove it toUnlink.emplace(n); } } } if (! toUnlink.empty()) { plan->unlinkNodes(toUnlink); } opt->addPlan(plan, rule, ! toUnlink.empty()); } //////////////////////////////////////////////////////////////////////////////// /// @brief remove all unnecessary filters /// this rule modifies the plan in place: /// - filters that are always true are removed completely /// - filters that are always false will be replaced by a NoResults node //////////////////////////////////////////////////////////////////////////////// void triagens::aql::removeUnnecessaryFiltersRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { bool modified = false; std::unordered_set<ExecutionNode*> toUnlink; // should we enter subqueries?? std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::FILTER, true)); for (auto const& n : nodes) { // filter nodes always have one input variable auto varsUsedHere = n->getVariablesUsedHere(); TRI_ASSERT(varsUsedHere.size() == 1); // now check who introduced our variable auto variable = varsUsedHere[0]; auto setter = plan->getVarSetBy(variable->id); if (setter == nullptr || setter->getType() != EN::CALCULATION) { // filter variable was not introduced by a calculation. continue; } // filter variable was introduced a CalculationNode. now check the expression auto s = static_cast<CalculationNode*>(setter); auto root = s->expression()->node(); TRI_ASSERT(root != nullptr); if (root->canThrow() || ! root->isDeterministic()) { // we better not tamper with this filter continue; } // filter expression is constant and thus cannot throw // we can now evaluate it safely TRI_ASSERT(! s->expression()->canThrow()); if (root->isTrue()) { // filter is always true // remove filter node and merge with following node toUnlink.emplace(n); modified = true; } else if (root->isFalse()) { // filter is always false // now insert a NoResults node below it auto noResults = new NoResultsNode(plan, plan->nextId()); plan->registerNode(noResults); plan->replaceNode(n, noResults); modified = true; } } if (! toUnlink.empty()) { plan->unlinkNodes(toUnlink); } opt->addPlan(plan, rule, modified); } #if 0 struct CollectVariableFinder { Variable const* searchVariable; std::unordered_set<std::string>& attributeNames; std::vector<AstNode const*> stack; bool canUseOptimization; bool isArgumentToLength; CollectVariableFinder (AggregateNode const* collectNode, std::unordered_set<std::string>& attributeNames) : searchVariable(collectNode->outVariable()), attributeNames(attributeNames), stack(), canUseOptimization(true), isArgumentToLength(false) { TRI_ASSERT(searchVariable != nullptr); stack.reserve(4); } void analyze (AstNode const* node) { TRI_ASSERT(node != nullptr); if (! canUseOptimization) { // we already know we cannot apply this optimization return; } stack.push_back(node); size_t const n = node->numMembers(); for (size_t i = 0; i < n; ++i) { auto sub = node->getMember(i); if (sub != nullptr) { // recurse into subnodes analyze(sub); } } if (node->type == NODE_TYPE_REFERENCE) { auto variable = static_cast<Variable const*>(node->getData()); TRI_ASSERT(variable != nullptr); if (variable->id == searchVariable->id) { bool handled = false; auto const size = stack.size(); if (size >= 3 && stack[size - 3]->type == NODE_TYPE_EXPANSION) { // our variable is used in an expansion, e.g. g[*].attribute auto expandNode = stack[size - 3]; TRI_ASSERT(expandNode->numMembers() == 2); TRI_ASSERT(expandNode->getMember(0)->type == NODE_TYPE_ITERATOR); auto expansion = expandNode->getMember(1); TRI_ASSERT(expansion != nullptr); while (expansion->type == NODE_TYPE_ATTRIBUTE_ACCESS) { // note which attribute is used with our variable if (expansion->getMember(0)->type == NODE_TYPE_ATTRIBUTE_ACCESS) { expansion = expansion->getMember(0); } else { attributeNames.emplace(expansion->getStringValue()); handled = true; break; } } } else if (size >= 3 && stack[size - 2]->type == NODE_TYPE_ARRAY && stack[size - 3]->type == NODE_TYPE_FCALL) { auto func = static_cast<Function const*>(stack[size - 3]->getData()); if (func->externalName == "LENGTH" && stack[size - 2]->numMembers() == 1) { // call to function LENGTH() with our variable as its single argument handled = true; isArgumentToLength = true; } } if (! handled) { canUseOptimization = false; } } } stack.pop_back(); } }; #endif //////////////////////////////////////////////////////////////////////////////// /// @brief specialize the variables used in a COLLECT INTO //////////////////////////////////////////////////////////////////////////////// #if 0 void triagens::aql::specializeCollectVariables (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { bool modified = false; std::vector<ExecutionNode*> nodes = plan->findNodesOfType(EN::AGGREGATE, true); for (auto n : nodes) { auto collectNode = static_cast<AggregateNode*>(n); TRI_ASSERT(collectNode != nullptr); auto deps = collectNode->getDependencies(); if (deps.size() != 1) { continue; } if (! collectNode->hasOutVariable() || collectNode->hasExpressionVariable() || collectNode->count()) { // COLLECT without INTO or a COLLECT that already uses an // expression variable or a COLLECT that only counts continue; } auto outVariable = collectNode->outVariable(); // must have an outVariable if we got here TRI_ASSERT(outVariable != nullptr); std::unordered_set<std::string> attributeNames; CollectVariableFinder finder(collectNode, attributeNames); // check all following nodes for usage of the out variable std::vector<ExecutionNode*> parents(n->getParents()); while (! parents.empty() && finder.canUseOptimization) { auto current = parents.back(); parents.pop_back(); for (auto it : current->getParents()) { parents.emplace_back(it); } // now check current node for usage of out variable auto const&& variablesUsed = current->getVariablesUsedHere(); bool found = false; for (auto it : variablesUsed) { if (it == outVariable) { found = true; break; } } if (found) { // variable is used. now find out how it is used if (current->getType() != EN::CALCULATION) { // variable is used outside of a calculation... skip optimization // TODO break; } auto calculationNode = static_cast<CalculationNode*>(current); auto expression = calculationNode->expression(); TRI_ASSERT(expression != nullptr); finder.analyze(expression->node()); } } if (finder.canUseOptimization) { // can use the optimization if (! finder.attributeNames.empty()) { auto obj = plan->getAst()->createNodeObject(); for (auto const& attributeName : finder.attributeNames) { for (auto it : collectNode->getVariablesUsedHere()) { if (it->name == attributeName) { auto refNode = plan->getAst()->createNodeReference(it); auto element = plan->getAst()->createNodeObjectElement(it->name.c_str(), refNode); obj->addMember(element); } } } if (obj->numMembers() == attributeNames.size()) { collectNode->removeDependency(deps[0]); auto calculationNode = plan->createTemporaryCalculation(obj); calculationNode->addDependency(deps[0]); collectNode->addDependency(calculationNode); collectNode->setExpressionVariable(calculationNode->outVariable()); modified = true; } } } } opt->addPlan(plan, rule, modified); } #endif //////////////////////////////////////////////////////////////////////////////// /// @brief remove INTO of a COLLECT if not used //////////////////////////////////////////////////////////////////////////////// void triagens::aql::removeCollectIntoRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { bool modified = false; std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::AGGREGATE, true)); for (auto const& n : nodes) { auto collectNode = static_cast<AggregateNode*>(n); TRI_ASSERT(collectNode != nullptr); auto outVariable = collectNode->outVariable(); if (outVariable == nullptr) { // no out variable. nothing to do continue; } auto varsUsedLater = n->getVarsUsedLater(); if (varsUsedLater.find(outVariable) != varsUsedLater.end()) { // outVariable is used later continue; } // outVariable is not used later. remove it! collectNode->clearOutVariable(); modified = true; } opt->addPlan(plan, rule, modified); } // ----------------------------------------------------------------------------- // --SECTION-- helper class for propagateConstantAttributesRule // ----------------------------------------------------------------------------- class PropagateConstantAttributesHelper { public: PropagateConstantAttributesHelper () : _constants(), _modified(false) { } bool modified () const { return _modified; } //////////////////////////////////////////////////////////////////////////////// /// @brief inspects a plan and propages constant values in expressions //////////////////////////////////////////////////////////////////////////////// void propagateConstants (ExecutionPlan* plan) { std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::FILTER, true)); for (auto const& node : nodes) { auto fn = static_cast<FilterNode*>(node); auto inVar = fn->getVariablesUsedHere(); TRI_ASSERT(inVar.size() == 1); auto setter = plan->getVarSetBy(inVar[0]->id); if (setter != nullptr && setter->getType() == EN::CALCULATION) { auto cn = static_cast<CalculationNode*>(setter);<|fim▁hole|> } } } if (! _constants.empty()) { for (auto const& node : nodes) { auto fn = static_cast<FilterNode*>(node); auto inVar = fn->getVariablesUsedHere(); TRI_ASSERT(inVar.size() == 1); auto setter = plan->getVarSetBy(inVar[0]->id); if (setter != nullptr && setter->getType() == EN::CALCULATION) { auto cn = static_cast<CalculationNode*>(setter); auto expression = cn->expression(); if (expression != nullptr) { insertConstantAttributes(const_cast<AstNode*>(expression->node())); } } } } } private: AstNode const* getConstant (Variable const* variable, std::string const& attribute) const { auto it = _constants.find(variable); if (it == _constants.end()) { return nullptr; } auto it2 = (*it).second.find(attribute); if (it2 == (*it).second.end()) { return nullptr; } return (*it2).second; } //////////////////////////////////////////////////////////////////////////////// /// @brief inspects an expression (recursively) and notes constant attribute /// values so they can be propagated later //////////////////////////////////////////////////////////////////////////////// void collectConstantAttributes (AstNode* node) { if (node == nullptr) { return; } if (node->type == NODE_TYPE_OPERATOR_BINARY_AND) { auto lhs = node->getMember(0); auto rhs = node->getMember(1); collectConstantAttributes(lhs); collectConstantAttributes(rhs); } else if (node->type == NODE_TYPE_OPERATOR_BINARY_EQ) { auto lhs = node->getMember(0); auto rhs = node->getMember(1); if (lhs->isConstant() && rhs->type == NODE_TYPE_ATTRIBUTE_ACCESS) { inspectConstantAttribute(rhs, lhs); } else if (rhs->isConstant() && lhs->type == NODE_TYPE_ATTRIBUTE_ACCESS) { inspectConstantAttribute(lhs, rhs); } } } //////////////////////////////////////////////////////////////////////////////// /// @brief traverses an AST part recursively and patches it by inserting /// constant values //////////////////////////////////////////////////////////////////////////////// void insertConstantAttributes (AstNode* node) { if (node == nullptr) { return; } if (node->type == NODE_TYPE_OPERATOR_BINARY_AND) { auto lhs = node->getMember(0); auto rhs = node->getMember(1); insertConstantAttributes(lhs); insertConstantAttributes(rhs); } else if (node->type == NODE_TYPE_OPERATOR_BINARY_EQ) { auto lhs = node->getMember(0); auto rhs = node->getMember(1); if (! lhs->isConstant() && rhs->type == NODE_TYPE_ATTRIBUTE_ACCESS) { insertConstantAttribute(node, 1); } if (! rhs->isConstant() && lhs->type == NODE_TYPE_ATTRIBUTE_ACCESS) { insertConstantAttribute(node, 0); } } } //////////////////////////////////////////////////////////////////////////////// /// @brief extract an attribute and its variable from an attribute access /// (e.g. `a.b.c` will return variable `a` and attribute name `b.c.`. //////////////////////////////////////////////////////////////////////////////// bool getAttribute (AstNode const* attribute, Variable const*& variable, std::string& name) { TRI_ASSERT(attribute != nullptr && attribute->type == NODE_TYPE_ATTRIBUTE_ACCESS); TRI_ASSERT(name.empty()); while (attribute->type == NODE_TYPE_ATTRIBUTE_ACCESS) { name = std::string(".") + std::string(attribute->getStringValue(), attribute->getStringLength()) + name; attribute = attribute->getMember(0); } if (attribute->type != NODE_TYPE_REFERENCE) { return false; } variable = static_cast<Variable const*>(attribute->getData()); TRI_ASSERT(variable != nullptr); return true; } //////////////////////////////////////////////////////////////////////////////// /// @brief inspect the constant value assigned to an attribute /// the attribute value will be stored so it can be inserted for the attribute /// later //////////////////////////////////////////////////////////////////////////////// void inspectConstantAttribute (AstNode const* attribute, AstNode const* value) { Variable const* variable = nullptr; std::string name; if (! getAttribute(attribute, variable, name)) { return; } auto it = _constants.find(variable); if (it == _constants.end()) { _constants.emplace(variable, std::unordered_map<std::string, AstNode const*>{ { name, value } }); return; } auto it2 = (*it).second.find(name); if (it2 == (*it).second.end()) { // first value for the attribute (*it).second.emplace(name, value); } else { auto previous = (*it2).second; if (previous == nullptr) { // we have multiple different values for the attribute. better not use this attribute return; } if (TRI_CompareValuesJson(value->computeJson(), previous->computeJson(), true) != 0) { // different value found for an already tracked attribute. better not use this attribute (*it2).second = nullptr; } } } //////////////////////////////////////////////////////////////////////////////// /// @brief patches an AstNode by inserting a constant value into it //////////////////////////////////////////////////////////////////////////////// void insertConstantAttribute (AstNode* parentNode, size_t accessIndex) { Variable const* variable = nullptr; std::string name; if (! getAttribute(parentNode->getMember(accessIndex), variable, name)) { return; } auto constantValue = getConstant(variable, name); if (constantValue != nullptr) { parentNode->changeMember(accessIndex, const_cast<AstNode*>(constantValue)); _modified = true; } } std::unordered_map<Variable const*, std::unordered_map<std::string, AstNode const*>> _constants; bool _modified; }; //////////////////////////////////////////////////////////////////////////////// /// @brief propagate constant attributes in FILTERs //////////////////////////////////////////////////////////////////////////////// void triagens::aql::propagateConstantAttributesRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { PropagateConstantAttributesHelper helper; helper.propagateConstants(plan); opt->addPlan(plan, rule, helper.modified()); } //////////////////////////////////////////////////////////////////////////////// /// @brief remove SORT RAND() if appropriate //////////////////////////////////////////////////////////////////////////////// void triagens::aql::removeSortRandRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { bool modified = false; // should we enter subqueries?? std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::SORT, true)); for (auto const& n : nodes) { auto node = static_cast<SortNode*>(n); auto const& elements = node->getElements(); if (elements.size() != 1) { // we're looking for "SORT RAND()", which has just one sort criterion continue; } auto const variable = elements[0].first; TRI_ASSERT(variable != nullptr); auto setter = plan->getVarSetBy(variable->id); if (setter == nullptr || setter->getType() != EN::CALCULATION) { continue; } auto cn = static_cast<CalculationNode*>(setter); auto const expression = cn->expression(); if (expression == nullptr || expression->node() == nullptr || expression->node()->type != NODE_TYPE_FCALL) { // not the right type of node continue; } auto funcNode = expression->node(); auto func = static_cast<Function const*>(funcNode->getData()); // we're looking for "RAND()", which is a function call // with an empty parameters array if (func->externalName != "RAND" || funcNode->numMembers() != 1 || funcNode->getMember(0)->numMembers() != 0) { continue; } // now we're sure we got SORT RAND() ! // we found what we were looking for! // now check if the dependencies qualify if (! n->hasDependency()) { break; } auto current = n->getFirstDependency(); ExecutionNode* collectionNode = nullptr; while (current != nullptr) { if (current->canThrow()) { // we shouldn't bypass a node that can throw collectionNode = nullptr; break; } switch (current->getType()) { case EN::SORT: case EN::AGGREGATE: case EN::FILTER: case EN::SUBQUERY: case EN::ENUMERATE_LIST: case EN::TRAVERSAL: case EN::INDEX: { // if we found another SortNode, an AggregateNode, FilterNode, a SubqueryNode, // an EnumerateListNode, a TraversalNode or an IndexNode // this means we cannot apply our optimization collectionNode = nullptr; current = nullptr; continue; // this will exit the while loop } case EN::ENUMERATE_COLLECTION: { if (collectionNode == nullptr) { // note this node collectionNode = current; break; } else { // we already found another collection node before. this means we // should not apply our optimization collectionNode = nullptr; current = nullptr; continue; // this will exit the while loop } // cannot get here TRI_ASSERT(false); } default: { // ignore all other nodes } } if (! current->hasDependency()) { break; } current = current->getFirstDependency(); } if (collectionNode != nullptr) { // we found a node to modify! TRI_ASSERT(collectionNode->getType() == EN::ENUMERATE_COLLECTION); // set the random iteration flag for the EnumerateCollectionNode static_cast<EnumerateCollectionNode*>(collectionNode)->setRandom(); // remove the SortNode // note: the CalculationNode will be removed by "remove-unnecessary-calculations" // rule if not used plan->unlinkNode(n); modified = true; } } opt->addPlan(plan, rule, modified); } //////////////////////////////////////////////////////////////////////////////// /// @brief move calculations up in the plan /// this rule modifies the plan in place /// it aims to move up calculations as far up in the plan as possible, to /// avoid redundant calculations in inner loops //////////////////////////////////////////////////////////////////////////////// void triagens::aql::moveCalculationsUpRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::CALCULATION, true)); bool modified = false; for (auto const& n : nodes) { auto nn = static_cast<CalculationNode*>(n); if (nn->expression()->canThrow() || ! nn->expression()->isDeterministic()) { // we will only move expressions up that cannot throw and that are deterministic continue; } std::unordered_set<Variable const*> neededVars; n->getVariablesUsedHere(neededVars); std::vector<ExecutionNode*> stack; n->addDependencies(stack); while (! stack.empty()) { auto current = stack.back(); stack.pop_back(); bool found = false; for (auto const& v : current->getVariablesSetHere()) { if (neededVars.find(v) != neededVars.end()) { // shared variable, cannot move up any more found = true; break; } } if (found) { // done with optimizing this calculation node break; } if (! current->hasDependency()) { // node either has no or more than one dependency. we don't know what to do and must abort // note: this will also handle Singleton nodes break; } current->addDependencies(stack); // first, unlink the calculation from the plan plan->unlinkNode(n); // and re-insert into before the current node plan->insertDependency(current, n); modified = true; } } opt->addPlan(plan, rule, modified); } //////////////////////////////////////////////////////////////////////////////// /// @brief move calculations down in the plan /// this rule modifies the plan in place /// it aims to move calculations as far down in the plan as possible, beyond /// FILTER and LIMIT operations //////////////////////////////////////////////////////////////////////////////// void triagens::aql::moveCalculationsDownRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::CALCULATION, true)); bool modified = false; for (auto const& n : nodes) { auto nn = static_cast<CalculationNode*>(n); if (nn->expression()->canThrow() || ! nn->expression()->isDeterministic()) { // we will only move expressions down that cannot throw and that are deterministic continue; } // this is the variable that the calculation will set auto variable = nn->outVariable(); std::vector<ExecutionNode*> stack; n->addParents(stack); bool shouldMove = false; ExecutionNode* lastNode = nullptr; while (! stack.empty()) { auto current = stack.back(); stack.pop_back(); lastNode = current; bool done = false; for (auto const& v : current->getVariablesUsedHere()) { if (v == variable) { // the node we're looking at needs the variable we're setting. // can't push further! done = true; break; } } if (done) { // done with optimizing this calculation node break; } auto const currentType = current->getType(); if (currentType == EN::FILTER || currentType == EN::SORT || currentType == EN::LIMIT || currentType == EN::SUBQUERY) { // we found something interesting that justifies moving our node down shouldMove = true; } else if (currentType == EN::INDEX || currentType == EN::ENUMERATE_COLLECTION || currentType == EN::ENUMERATE_LIST || currentType == EN::TRAVERSAL || currentType == EN::AGGREGATE || currentType == EN::NORESULTS) { // we will not push further down than such nodes shouldMove = false; break; } if (! current->hasParent()) { break; } current->addParents(stack); } if (shouldMove && lastNode != nullptr) { // first, unlink the calculation from the plan plan->unlinkNode(n); // and re-insert into before the current node plan->insertDependency(lastNode, n); modified = true; } } opt->addPlan(plan, rule, modified); } //////////////////////////////////////////////////////////////////////////////// /// @brief fuse calculations in the plan /// this rule modifies the plan in place //////////////////////////////////////////////////////////////////////////////// void triagens::aql::fuseCalculationsRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::CALCULATION, true)); if (nodes.size() < 2) { opt->addPlan(plan, rule, false); return; } std::unordered_set<ExecutionNode*> toUnlink; for (auto const& n : nodes) { auto nn = static_cast<CalculationNode*>(n); if (nn->expression()->canThrow() || ! nn->expression()->isDeterministic()) { // we will only fuse calculations of expressions that cannot throw and that are deterministic continue; } if (toUnlink.find(n) != toUnlink.end()) { // do not process the same node twice continue; } std::unordered_map<Variable const*, ExecutionNode*> toInsert; for (auto& it : nn->getVariablesUsedHere()) { if (! n->isVarUsedLater(it)) { toInsert.emplace(it, n); } } TRI_ASSERT(n->hasDependency()); std::vector<ExecutionNode*> stack{ n->getFirstDependency() }; while (! stack.empty()) { auto current = stack.back(); stack.pop_back(); bool handled = false; if (current->getType() == EN::CALCULATION) { auto otherExpression = static_cast<CalculationNode const*>(current)->expression(); if (otherExpression->isDeterministic() && ! otherExpression->canThrow() && otherExpression->canRunOnDBServer() == nn->expression()->canRunOnDBServer()) { // found another calculation node auto varsSet(std::move(current->getVariablesSetHere())); if (varsSet.size() == 1) { // check if it is a calculation for a variable that we are looking for auto it = toInsert.find(varsSet[0]); if (it != toInsert.end()) { // remove the variable from the list of search variables toInsert.erase(it); // replace the variable reference in the original expression with the expression for that variable auto expression = nn->expression(); TRI_ASSERT(expression != nullptr); expression->replaceVariableReference((*it).first, otherExpression->node()); toUnlink.emplace(current); // insert the calculations' own referenced variables into the list of search variables for (auto& it2 : current->getVariablesUsedHere()) { if (! n->isVarUsedLater(it2)) { toInsert.emplace(it2, n); } } handled = true; } } } } if (! handled) { // remove all variables from our list that might be used elsewhere for (auto& it : current->getVariablesUsedHere()) { toInsert.erase(it); } } if (toInsert.empty()) { // done break; } if (! current->hasDependency()) { break; } stack.emplace_back(current->getFirstDependency()); } } if (! toUnlink.empty()) { plan->unlinkNodes(toUnlink); } opt->addPlan(plan, rule, ! toUnlink.empty()); } //////////////////////////////////////////////////////////////////////////////// /// @brief determine the "right" type of AggregateNode and /// add a sort node for each COLLECT (note: the sort may be removed later) /// this rule cannot be turned off (otherwise, the query result might be wrong!) //////////////////////////////////////////////////////////////////////////////// void triagens::aql::specializeCollectRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::AGGREGATE, true)); bool modified = false; for (auto const& n : nodes) { auto collectNode = static_cast<AggregateNode*>(n); if (collectNode->isSpecialized()) { // already specialized this node continue; } auto const& aggregateVariables = collectNode->aggregateVariables(); // test if we can use an alternative version of COLLECT with a hash table bool const canUseHashAggregation = (! aggregateVariables.empty() && (! collectNode->hasOutVariable() || collectNode->count()) && collectNode->getOptions().canUseHashMethod()); if (canUseHashAggregation) { // create a new plan with the adjusted COLLECT node std::unique_ptr<ExecutionPlan> newPlan(plan->clone()); // use the cloned COLLECT node auto newCollectNode = static_cast<AggregateNode*>(newPlan->getNodeById(collectNode->id())); TRI_ASSERT(newCollectNode != nullptr); // specialize the AggregateNode so it will become a HashAggregateBlock later // additionally, add a SortNode BEHIND the AggregateNode (to sort the final result) newCollectNode->aggregationMethod(AggregationOptions::AggregationMethod::AGGREGATION_METHOD_HASH); newCollectNode->specialized(); if (! collectNode->isDistinctCommand()) { // add the post-SORT std::vector<std::pair<Variable const*, bool>> sortElements; for (auto const& v : newCollectNode->aggregateVariables()) { sortElements.emplace_back(std::make_pair(v.first, true)); } auto sortNode = new SortNode(newPlan.get(), newPlan->nextId(), sortElements, false); newPlan->registerNode(sortNode); TRI_ASSERT(newCollectNode->hasParent()); auto const& parents = newCollectNode->getParents(); auto parent = parents[0]; sortNode->addDependency(newCollectNode); parent->replaceDependency(newCollectNode, sortNode); } newPlan->findVarUsage(); if (nodes.size() > 1) { // this will tell the optimizer to optimize the cloned plan with this specific rule again opt->addPlan(newPlan.release(), rule, true, static_cast<int>(rule->level - 1)); } else { // no need to run this specific rule again on the cloned plan opt->addPlan(newPlan.release(), rule, true); } } // mark node as specialized, so we do not process it again collectNode->specialized(); // finally, adjust the original plan and create a sorted version of COLLECT // specialize the AggregateNode so it will become a SortedAggregateBlock later collectNode->aggregationMethod(AggregationOptions::AggregationMethod::AGGREGATION_METHOD_SORTED); // insert a SortNode IN FRONT OF the AggregateNode if (! aggregateVariables.empty()) { std::vector<std::pair<Variable const*, bool>> sortElements; for (auto const& v : aggregateVariables) { sortElements.emplace_back(std::make_pair(v.second, true)); } auto sortNode = new SortNode(plan, plan->nextId(), sortElements, true); plan->registerNode(sortNode); TRI_ASSERT(collectNode->hasDependency()); auto dep = collectNode->getFirstDependency(); sortNode->addDependency(dep); collectNode->replaceDependency(dep, sortNode); modified = true; } } opt->addPlan(plan, rule, modified); } //////////////////////////////////////////////////////////////////////////////// /// @brief split and-combined filters and break them into smaller parts //////////////////////////////////////////////////////////////////////////////// void triagens::aql::splitFiltersRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::FILTER, true)); bool modified = false; for (auto const& n : nodes) { auto inVars(std::move(n->getVariablesUsedHere())); TRI_ASSERT(inVars.size() == 1); auto setter = plan->getVarSetBy(inVars[0]->id); if (setter == nullptr || setter->getType() != EN::CALCULATION) { continue; } auto cn = static_cast<CalculationNode*>(setter); auto const expression = cn->expression(); if (expression->canThrow() || ! expression->isDeterministic() || expression->node()->type != NODE_TYPE_OPERATOR_BINARY_AND) { continue; } std::vector<AstNode*> stack{ expression->nodeForModification() }; while (! stack.empty()) { auto current = stack.back(); stack.pop_back(); if (current->type == NODE_TYPE_OPERATOR_BINARY_AND) { stack.emplace_back(current->getMember(0)); stack.emplace_back(current->getMember(1)); } else { modified = true; ExecutionNode* calculationNode = nullptr; auto outVar = plan->getAst()->variables()->createTemporaryVariable(); auto expression = new Expression(plan->getAst(), current); try { calculationNode = new CalculationNode(plan, plan->nextId(), expression, outVar); } catch (...) { delete expression; throw; } plan->registerNode(calculationNode); plan->insertDependency(n, calculationNode); auto filterNode = new FilterNode(plan, plan->nextId(), outVar); plan->registerNode(filterNode); plan->insertDependency(n, filterNode); } } if (modified) { plan->unlinkNode(n, false); } } opt->addPlan(plan, rule, modified); } //////////////////////////////////////////////////////////////////////////////// /// @brief move filters up in the plan /// this rule modifies the plan in place /// filters are moved as far up in the plan as possible to make result sets /// as small as possible as early as possible /// filters are not pushed beyond limits //////////////////////////////////////////////////////////////////////////////// void triagens::aql::moveFiltersUpRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::FILTER, true)); bool modified = false; for (auto const& n : nodes) { auto neededVars = n->getVariablesUsedHere(); TRI_ASSERT(neededVars.size() == 1); std::vector<ExecutionNode*> stack; n->addDependencies(stack); while (! stack.empty()) { auto current = stack.back(); stack.pop_back(); if (current->getType() == EN::LIMIT) { // cannot push a filter beyond a LIMIT node break; } if (current->canThrow()) { // must not move a filter beyond a node that can throw break; } if (current->getType() == EN::CALCULATION) { // must not move a filter beyond a node with a non-deterministic result auto calculation = static_cast<CalculationNode const*>(current); if (! calculation->expression()->isDeterministic()) { break; } } bool found = false; for (auto const& v : current->getVariablesSetHere()) { for (auto it = neededVars.begin(); it != neededVars.end(); ++it) { if ((*it)->id == v->id) { // shared variable, cannot move up any more found = true; break; } } } if (found) { // done with optimizing this calculation node break; } if (! current->hasDependency()) { // node either has no or more than one dependency. we don't know what to do and must abort // note: this will also handle Singleton nodes break; } current->addDependencies(stack); // first, unlink the filter from the plan plan->unlinkNode(n); // and re-insert into plan in front of the current node plan->insertDependency(current, n); modified = true; } } opt->addPlan(plan, rule, modified); } class triagens::aql::RedundantCalculationsReplacer final : public WalkerWorker<ExecutionNode> { public: explicit RedundantCalculationsReplacer (std::unordered_map<VariableId, Variable const*> const& replacements) : _replacements(replacements) { } template<typename T> void replaceInVariable (ExecutionNode* en) { auto node = static_cast<T*>(en); node->_inVariable = Variable::replace(node->_inVariable, _replacements); } void replaceInCalculation (ExecutionNode* en) { auto node = static_cast<CalculationNode*>(en); std::unordered_set<Variable const*> variables; node->expression()->variables(variables); // check if the calculation uses any of the variables that we want to replace for (auto const& it : variables) { if (_replacements.find(it->id) != _replacements.end()) { // calculation uses a to-be-replaced variable node->expression()->replaceVariables(_replacements); return; } } } bool before (ExecutionNode* en) override final { switch (en->getType()) { case EN::ENUMERATE_LIST: { replaceInVariable<EnumerateListNode>(en); break; } case EN::RETURN: { replaceInVariable<ReturnNode>(en); break; } case EN::CALCULATION: { replaceInCalculation(en); break; } case EN::FILTER: { replaceInVariable<FilterNode>(en); break; } case EN::AGGREGATE: { auto node = static_cast<AggregateNode*>(en); for (auto& variable : node->_aggregateVariables) { variable.second = Variable::replace(variable.second, _replacements); } break; } case EN::SORT: { auto node = static_cast<SortNode*>(en); for (auto& variable : node->_elements) { variable.first = Variable::replace(variable.first, _replacements); } break; } default: { // ignore all other types of nodes } } // always continue return false; } private: std::unordered_map<VariableId, Variable const*> const& _replacements; }; //////////////////////////////////////////////////////////////////////////////// /// @brief remove CalculationNode(s) that are repeatedly used in a query /// (i.e. common expressions) //////////////////////////////////////////////////////////////////////////////// void triagens::aql::removeRedundantCalculationsRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::CALCULATION, true)); if (nodes.size() < 2) { // quick exit opt->addPlan(plan, rule, false); return; } triagens::basics::StringBuffer buffer(TRI_UNKNOWN_MEM_ZONE); std::unordered_map<VariableId, Variable const*> replacements; for (auto const& n : nodes) { auto nn = static_cast<CalculationNode*>(n); if (! nn->expression()->isDeterministic()) { // If this node is non-deterministic, we must not touch it! continue; } auto outvar = n->getVariablesSetHere(); TRI_ASSERT(outvar.size() == 1); try { nn->expression()->stringifyIfNotTooLong(&buffer); } catch (...) { // expression could not be stringified (maybe because not all node types // are supported). this is not an error, we just skip the optimization buffer.reset(); continue; } std::string const referenceExpression(buffer.c_str(), buffer.length()); buffer.reset(); std::vector<ExecutionNode*> stack; n->addDependencies(stack); while (! stack.empty()) { auto current = stack.back(); stack.pop_back(); if (current->getType() == EN::CALCULATION) { try { static_cast<CalculationNode*>(current)->expression()->stringifyIfNotTooLong(&buffer); } catch (...) { // expression could not be stringified (maybe because not all node types // are supported). this is not an error, we just skip the optimization buffer.reset(); continue; } std::string const compareExpression(buffer.c_str(), buffer.length()); buffer.reset(); if (compareExpression == referenceExpression) { // expressions are identical auto outvars = current->getVariablesSetHere(); TRI_ASSERT(outvars.size() == 1); // check if target variable is already registered as a replacement // this covers the following case: // - replacements is set to B => C // - we're now inserting a replacement A => B // the goal now is to enter a replacement A => C instead of A => B auto target = outvars[0]; while (target != nullptr) { auto it = replacements.find(target->id); if (it != replacements.end()) { target = (*it).second; } else { break; } } replacements.emplace(outvar[0]->id, target); // also check if the insertion enables further shortcuts // this covers the following case: // - replacements is set to A => B // - we have just inserted a replacement B => C // the goal now is to change the replacement A => B to A => C for (auto it = replacements.begin(); it != replacements.end(); ++it) { if ((*it).second == outvar[0]) { (*it).second = target; } } } } if (current->getType() == EN::AGGREGATE) { if (static_cast<AggregateNode*>(current)->hasOutVariable()) { // COLLECT ... INTO is evil (tm): it needs to keep all already defined variables // we need to abort optimization here break; } } if (! current->hasDependency()) { // node either has no or more than one dependency. we don't know what to do and must abort // note: this will also handle Singleton nodes break; } current->addDependencies(stack); } } if (! replacements.empty()) { // finally replace the variables RedundantCalculationsReplacer finder(replacements); plan->root()->walk(&finder); opt->addPlan(plan, rule, true); } else { // no changes opt->addPlan(plan, rule, false); } } //////////////////////////////////////////////////////////////////////////////// /// @brief remove CalculationNodes and SubqueryNodes that are never needed /// this modifies an existing plan in place //////////////////////////////////////////////////////////////////////////////// void triagens::aql::removeUnnecessaryCalculationsRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::vector<ExecutionNode::NodeType> const types = { EN::CALCULATION, EN::SUBQUERY }; std::vector<ExecutionNode*> nodes(plan->findNodesOfType(types, true)); std::unordered_set<ExecutionNode*> toUnlink; for (auto const& n : nodes) { if (n->getType() == EN::CALCULATION) { auto nn = static_cast<CalculationNode*>(n); if (nn->canThrow() && ! nn->canRemoveIfThrows()) { // If this node can throw, we must not optimize it away! continue; } } else { auto nn = static_cast<SubqueryNode*>(n); if (nn->canThrow()) { // subqueries that can throw must not be optimized away continue; } if (nn->isModificationQuery()) { // subqueries that modify data must not be optimized away continue; } } auto outvar = n->getVariablesSetHere(); TRI_ASSERT(outvar.size() == 1); auto varsUsedLater = n->getVarsUsedLater(); if (varsUsedLater.find(outvar[0]) == varsUsedLater.end()) { // The variable whose value is calculated here is not used at // all further down the pipeline! We remove the whole // calculation node, toUnlink.emplace(n); } } if (! toUnlink.empty()) { plan->unlinkNodes(toUnlink); } opt->addPlan(plan, rule, ! toUnlink.empty()); } //////////////////////////////////////////////////////////////////////////////// /// @brief useIndex, try to use an index for filtering //////////////////////////////////////////////////////////////////////////////// void triagens::aql::useIndexesRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { // These are all the nodes where we start traversing (including all subqueries) std::vector<ExecutionNode*> nodes(plan->findEndNodes(true)); std::unordered_map<size_t, ExecutionNode*> changes; auto cleanupChanges = [&changes] () -> void { for (auto& v : changes) { delete v.second; } changes.clear(); }; TRI_DEFER(cleanupChanges()); bool hasEmptyResult = false; for (auto const& n : nodes) { ConditionFinder finder(plan, &changes, &hasEmptyResult); n->walk(&finder); } if (! changes.empty()) { for (auto& it : changes) { plan->registerNode(it.second); plan->replaceNode(plan->getNodeById(it.first), it.second); // prevent double deletion by cleanupChanges() it.second = nullptr; } opt->addPlan(plan, rule, true); } else { opt->addPlan(plan, rule, hasEmptyResult); } } struct SortToIndexNode final : public WalkerWorker<ExecutionNode> { ExecutionPlan* _plan; SortNode* _sortNode; std::vector<std::pair<VariableId, bool>> _sorts; std::unordered_map<VariableId, AstNode const*> _variableDefinitions; bool _modified; public: explicit SortToIndexNode (ExecutionPlan* plan) : _plan(plan), _sortNode(nullptr), _sorts(), _variableDefinitions(), _modified(false) { } bool handleEnumerateCollectionNode (EnumerateCollectionNode* enumerateCollectionNode) { if (_sortNode == nullptr) { return true; } if (enumerateCollectionNode->isInInnerLoop()) { // index node contained in an outer loop. must not optimize away the sort! return true; } SortCondition sortCondition(_sorts, _variableDefinitions); if (! sortCondition.isEmpty() && sortCondition.isOnlyAttributeAccess() && sortCondition.isUnidirectional()) { // we have found a sort condition, which is unidirectionl // now check if any of the collection's indexes covers it Variable const* outVariable = enumerateCollectionNode->outVariable(); auto const& indexes = enumerateCollectionNode->collection()->getIndexes(); triagens::aql::Index const* bestIndex = nullptr; double bestCost = 0.0; size_t bestNumCovered = 0; for (auto& index : indexes) { if (! index->isSorted() || index->sparse) { // can only use a sorted index // cannot use a sparse index for sorting continue; } auto numCovered = sortCondition.coveredAttributes(outVariable, index->fields); if (numCovered == 0) { continue; } double estimatedCost = 0.0; if (! index->supportsSortCondition(&sortCondition, outVariable, enumerateCollectionNode->collection()->count(), estimatedCost)) { // should never happen TRI_ASSERT(false); continue; } if (bestIndex == nullptr || estimatedCost < bestCost) { bestIndex = index; bestCost = estimatedCost; bestNumCovered = numCovered; } } if (bestIndex != nullptr) { auto condition = std::make_unique<Condition>(_plan->getAst()); condition->normalize(_plan); std::unique_ptr<ExecutionNode> newNode(new IndexNode( _plan, _plan->nextId(), enumerateCollectionNode->vocbase(), enumerateCollectionNode->collection(), outVariable, std::vector<Index const*>({ bestIndex }), condition.get(), sortCondition.isDescending() )); condition.release(); auto n = newNode.release(); _plan->registerNode(n); _plan->replaceNode(enumerateCollectionNode, n); _modified = true; if (bestNumCovered == sortCondition.numAttributes()) { // if the index covers the complete sort condition, we can also remove the sort node _plan->unlinkNode(_plan->getNodeById(_sortNode->id())); } } } return true; // always abort further searching here } bool handleIndexNode (IndexNode* indexNode) { if (_sortNode == nullptr) { return true; } if (indexNode->isInInnerLoop()) { // index node contained in an outer loop. must not optimize away the sort! return true; } auto const& indexes = indexNode->getIndexes(); auto cond = indexNode->condition(); if (indexes.size() != 1) { // can only use this index node if it uses exactly one index or multiple indexes on exactly the same attributes if (! cond->isSorted()) { // index conditions do not guarantee sortedness return true; } std::vector<std::vector<triagens::basics::AttributeName>> seen; for (auto& index : indexes) { if (index->sparse) { // cannot use a sparse index for sorting return true; } if (! seen.empty() && triagens::basics::AttributeName::isIdentical(index->fields, seen, true)) { // different attributes return true; } } // all indexes use the same attributes and index conditions guarantee sorted output } // if we get here, we either have one index or multiple indexes on the same attributes auto index = indexes[0]; bool handled = false; SortCondition sortCondition(_sorts, _variableDefinitions); bool const isOnlyAttributeAccess = (! sortCondition.isEmpty() && sortCondition.isOnlyAttributeAccess()); if (isOnlyAttributeAccess && index->isSorted() && ! index->sparse && sortCondition.isUnidirectional() && sortCondition.isDescending() == indexNode->reverse()) { // we have found a sort condition, which is unidirectional and in the same // order as the IndexNode... // now check if the sort attributes match the ones of the index Variable const* outVariable = indexNode->outVariable(); auto numCovered = sortCondition.coveredAttributes(outVariable, index->fields); if (numCovered == sortCondition.numAttributes()) { // sort condition is fully covered by index... now we can remove the sort node from the plan _plan->unlinkNode(_plan->getNodeById(_sortNode->id())); _modified = true; handled = true; } } if (! handled && isOnlyAttributeAccess && indexes.size() == 1) { // special case... the index cannot be used for sorting, but we only compare with equality // lookups. now check if the equality lookup attributes are the same as the index attributes auto root = cond->root(); if (root != nullptr) { auto condNode = root->getMember(0); if (condNode->isOnlyEqualityMatch()) { // now check if the index fields are the same as the sort condition fields // e.g. FILTER c.value1 == 1 && c.value2 == 42 SORT c.value1, c.value2 Variable const* outVariable = indexNode->outVariable(); size_t coveredFields = sortCondition.coveredAttributes(outVariable, index->fields); if (coveredFields == sortCondition.numAttributes() && (index->isSorted() || index->fields.size() == sortCondition.numAttributes())) { // no need to sort _plan->unlinkNode(_plan->getNodeById(_sortNode->id())); _modified = true; } } } } return true; // always abort after we found an IndexNode } bool enterSubquery (ExecutionNode*, ExecutionNode*) override final { return false; } bool before (ExecutionNode* en) override final { switch (en->getType()) { case EN::TRAVERSAL: case EN::ENUMERATE_LIST: case EN::SUBQUERY: case EN::FILTER: return false; // skip. we don't care. case EN::CALCULATION: { auto outvars = en->getVariablesSetHere(); TRI_ASSERT(outvars.size() == 1); _variableDefinitions.emplace(outvars[0]->id, static_cast<CalculationNode const*>(en)->expression()->node()); return false; } case EN::SINGLETON: case EN::AGGREGATE: case EN::INSERT: case EN::REMOVE: case EN::REPLACE: case EN::UPDATE: case EN::UPSERT: case EN::RETURN: case EN::NORESULTS: case EN::SCATTER: case EN::DISTRIBUTE: case EN::GATHER: case EN::REMOTE: case EN::ILLEGAL: case EN::LIMIT: // LIMIT is criterion to stop return true; // abort. case EN::SORT: // pulling two sorts together is done elsewhere. if (! _sorts.empty() || _sortNode != nullptr) { return true; // a different SORT node. abort } _sortNode = static_cast<SortNode*>(en); for (auto& it : _sortNode->getElements()) { _sorts.emplace_back((it.first)->id, it.second); } return false; case EN::INDEX: return handleIndexNode(static_cast<IndexNode*>(en)); case EN::ENUMERATE_COLLECTION: return handleEnumerateCollectionNode(static_cast<EnumerateCollectionNode*>(en)); } return true; } }; void triagens::aql::useIndexForSortRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { bool modified = false; std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::SORT, true)); for (auto const& n : nodes) { auto sortNode = static_cast<SortNode*>(n); SortToIndexNode finder(plan); sortNode->walk(&finder); if (finder._modified) { modified = true; } } opt->addPlan(plan, rule, modified); } //////////////////////////////////////////////////////////////////////////////// /// @brief try to remove filters which are covered by indexes //////////////////////////////////////////////////////////////////////////////// void triagens::aql::removeFiltersCoveredByIndexRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::unordered_set<ExecutionNode*> toUnlink; bool modified = false; std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::FILTER, true)); for (auto const& node : nodes) { auto fn = static_cast<FilterNode const*>(node); // find the node with the filter expression auto inVar = fn->getVariablesUsedHere(); TRI_ASSERT(inVar.size() == 1); auto setter = plan->getVarSetBy(inVar[0]->id); if (setter == nullptr || setter->getType() != EN::CALCULATION) { continue; } auto calculationNode = static_cast<CalculationNode*>(setter); auto conditionNode = calculationNode->expression()->node(); // build the filter condition auto condition = std::make_unique<Condition>(plan->getAst()); condition->andCombine(conditionNode); condition->normalize(plan); if (condition->root() == nullptr) { continue; } size_t const n = condition->root()->numMembers(); if (n != 1) { // either no condition or multiple ORed conditions... continue; } bool handled = false; auto current = node; while (current != nullptr) { if (current->getType() == EN::INDEX) { auto indexNode = static_cast<IndexNode const*>(current); // found an index node, now check if the expression is covered by the index auto indexCondition = indexNode->condition(); if (indexCondition != nullptr && ! indexCondition->isEmpty()) { auto const& indexesUsed = indexNode->getIndexes(); if (indexesUsed.size() == 1) { // single index. this is something that we can handle auto newNode = condition->removeIndexCondition(indexNode->outVariable(), indexCondition->root()); if (newNode == nullptr) { // no condition left... // FILTER node can be completely removed toUnlink.emplace(setter); toUnlink.emplace(node); modified = true; handled = true; } else if (newNode != condition->root()) { // some condition is left, but it is a different one than // the one from the FILTER node auto expr = std::make_unique<Expression>(plan->getAst(), newNode); CalculationNode* cn = new CalculationNode(plan, plan->nextId(), expr.get(), calculationNode->outVariable()); expr.release(); plan->registerNode(cn); plan->replaceNode(setter, cn); modified = true; handled = true; } } } if (handled) { break; } } if (handled || current->getType() == EN::LIMIT || ! current->hasDependency()) { break; } current = current->getFirstDependency(); } } if (! toUnlink.empty()) { plan->unlinkNodes(toUnlink); } opt->addPlan(plan, rule, modified); } //////////////////////////////////////////////////////////////////////////////// /// @brief helper to compute lots of permutation tuples /// a permutation tuple is represented as a single vector together with /// another vector describing the boundaries of the tuples. /// Example: /// data: 0,1,2, 3,4, 5,6 /// starts: 0, 3, 5, (indices of starts of sections) /// means a tuple of 3 permutations of 3, 2 and 2 points respectively /// This function computes the next permutation tuple among the /// lexicographically sorted list of all such tuples. It returns true /// if it has successfully computed this and false if the tuple is already /// the lexicographically largest one. If false is returned, the permutation /// tuple is back to the beginning. //////////////////////////////////////////////////////////////////////////////// static bool NextPermutationTuple (std::vector<size_t>& data, std::vector<size_t>& starts) { auto begin = data.begin(); // a random access iterator for (size_t i = starts.size(); i-- != 0; ) { std::vector<size_t>::iterator from = begin + starts[i]; std::vector<size_t>::iterator to; if (i == starts.size() - 1) { to = data.end(); } else { to = begin + starts[i + 1]; } if (std::next_permutation(from, to)) { return true; } } return false; } //////////////////////////////////////////////////////////////////////////////// /// @brief interchange adjacent EnumerateCollectionNodes in all possible ways //////////////////////////////////////////////////////////////////////////////// void triagens::aql::interchangeAdjacentEnumerationsRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::ENUMERATE_COLLECTION, true)); std::unordered_set<ExecutionNode*> nodesSet; for (auto const& n : nodes) { TRI_ASSERT(nodesSet.find(n) == nodesSet.end()); nodesSet.emplace(n); } std::vector<ExecutionNode*> nodesToPermute; std::vector<size_t> permTuple; std::vector<size_t> starts; // We use that the order of the nodes is such that a node B that is among the // recursive dependencies of a node A is later in the vector. for (auto const& n : nodes) { if (nodesSet.find(n) != nodesSet.end()) { std::vector<ExecutionNode*> nn{ n }; nodesSet.erase(n); // Now follow the dependencies as long as we see further such nodes: auto nwalker = n; while (true) { if (! nwalker->hasDependency()) { break; } auto dep = nwalker->getFirstDependency(); if (dep->getType() != EN::ENUMERATE_COLLECTION) { break; } nwalker = dep; nn.emplace_back(nwalker); nodesSet.erase(nwalker); } if (nn.size() > 1) { // Move it into the permutation tuple: starts.emplace_back(permTuple.size()); for (auto const& nnn : nn) { nodesToPermute.emplace_back(nnn); permTuple.emplace_back(permTuple.size()); } } } } // Now we have collected all the runs of EnumerateCollectionNodes in the // plan, we need to compute all possible permutations of all of them, // independently. This is why we need to compute all permutation tuples. opt->addPlan(plan, rule, false); if (! starts.empty()) { NextPermutationTuple(permTuple, starts); // will never return false do { // Clone the plan: auto newPlan = plan->clone(); try { // get rid of plan if any of this fails // Find the nodes in the new plan corresponding to the ones in the // old plan that we want to permute: std::vector<ExecutionNode*> newNodes; for (size_t j = 0; j < nodesToPermute.size(); j++) { newNodes.emplace_back(newPlan->getNodeById(nodesToPermute[j]->id())); } // Now get going with the permutations: for (size_t i = 0; i < starts.size(); i++) { size_t lowBound = starts[i]; size_t highBound = (i < starts.size()-1) ? starts[i+1] : permTuple.size(); // We need to remove the nodes // newNodes[lowBound..highBound-1] in newPlan and replace // them by the same ones in a different order, given by // permTuple[lowBound..highBound-1]. auto const& parents = newNodes[lowBound]->getParents(); TRI_ASSERT(parents.size() == 1); auto parent = parents[0]; // needed for insertion later // Unlink all those nodes: for (size_t j = lowBound; j < highBound; j++) { newPlan->unlinkNode(newNodes[j]); } // And insert them in the new order: for (size_t j = highBound; j-- != lowBound; ) { newPlan->insertDependency(parent, newNodes[permTuple[j]]); } } // OK, the new plan is ready, let's report it: if (! opt->addPlan(newPlan, rule, true)) { // have enough plans. stop permutations break; } } catch (...) { delete newPlan; throw; } } while (NextPermutationTuple(permTuple, starts)); } } //////////////////////////////////////////////////////////////////////////////// /// @brief scatter operations in cluster /// this rule inserts scatter, gather and remote nodes so operations on sharded /// collections actually work /// it will change plans in place //////////////////////////////////////////////////////////////////////////////// void triagens::aql::scatterInClusterRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { bool wasModified = false; if (triagens::arango::ServerState::instance()->isCoordinator()) { // find subqueries std::unordered_map<ExecutionNode*, ExecutionNode*> subqueries; for (auto& it : plan->findNodesOfType(ExecutionNode::SUBQUERY, true)) { subqueries.emplace(static_cast<SubqueryNode const*>(it)->getSubquery(), it); } // we are a coordinator. now look in the plan for nodes of type // EnumerateCollectionNode, IndexNode and modification nodes std::vector<ExecutionNode::NodeType> const types = { ExecutionNode::ENUMERATE_COLLECTION, ExecutionNode::INDEX, ExecutionNode::INSERT, ExecutionNode::UPDATE, ExecutionNode::REPLACE, ExecutionNode::REMOVE, ExecutionNode::UPSERT // TODO: check if ok here }; std::vector<ExecutionNode*> nodes(plan->findNodesOfType(types, true)); for (auto& node: nodes) { // found a node we need to replace in the plan auto const& parents = node->getParents(); auto const& deps = node->getDependencies(); TRI_ASSERT(deps.size() == 1); // don't do this if we are already distributing! if (deps[0]->getType() == ExecutionNode::REMOTE && deps[0]->getFirstDependency()->getType() == ExecutionNode::DISTRIBUTE) { continue; } bool const isRootNode = plan->isRoot(node); plan->unlinkNode(node, true); auto const nodeType = node->getType(); // extract database and collection from plan node TRI_vocbase_t* vocbase = nullptr; Collection const* collection = nullptr; if (nodeType == ExecutionNode::ENUMERATE_COLLECTION) { vocbase = static_cast<EnumerateCollectionNode*>(node)->vocbase(); collection = static_cast<EnumerateCollectionNode*>(node)->collection(); } else if (nodeType == ExecutionNode::INDEX) { vocbase = static_cast<IndexNode*>(node)->vocbase(); collection = static_cast<IndexNode*>(node)->collection(); } else if (nodeType == ExecutionNode::INSERT || nodeType == ExecutionNode::UPDATE || nodeType == ExecutionNode::REPLACE || nodeType == ExecutionNode::REMOVE || nodeType == ExecutionNode::UPSERT) { vocbase = static_cast<ModificationNode*>(node)->vocbase(); collection = static_cast<ModificationNode*>(node)->collection(); if (nodeType == ExecutionNode::REMOVE || nodeType == ExecutionNode::UPDATE) { // Note that in the REPLACE or UPSERT case we are not getting here, since // the distributeInClusterRule fires and a DistributionNode is // used. auto* modNode = static_cast<ModificationNode*>(node); modNode->getOptions().ignoreDocumentNotFound = true; } } else { TRI_ASSERT(false); } // insert a scatter node ExecutionNode* scatterNode = new ScatterNode(plan, plan->nextId(), vocbase, collection); plan->registerNode(scatterNode); scatterNode->addDependency(deps[0]); // insert a remote node ExecutionNode* remoteNode = new RemoteNode(plan, plan->nextId(), vocbase, collection, "", "", ""); plan->registerNode(remoteNode); remoteNode->addDependency(scatterNode); // re-link with the remote node node->addDependency(remoteNode); // insert another remote node remoteNode = new RemoteNode(plan, plan->nextId(), vocbase, collection, "", "", ""); plan->registerNode(remoteNode); remoteNode->addDependency(node); // insert a gather node ExecutionNode* gatherNode = new GatherNode(plan, plan->nextId(), vocbase, collection); plan->registerNode(gatherNode); gatherNode->addDependency(remoteNode); // and now link the gather node with the rest of the plan if (parents.size() == 1) { parents[0]->replaceDependency(deps[0], gatherNode); } // check if the node that we modified was at the end of a subquery auto it = subqueries.find(node); if (it != subqueries.end()) { static_cast<SubqueryNode*>((*it).second)->setSubquery(gatherNode, true); } if (isRootNode) { // if we replaced the root node, set a new root node plan->root(gatherNode); } wasModified = true; } } opt->addPlan(plan, rule, wasModified); } //////////////////////////////////////////////////////////////////////////////// /// @brief distribute operations in cluster /// /// this rule inserts distribute, remote nodes so operations on sharded /// collections actually work, this differs from scatterInCluster in that every /// incoming row is only sent to one shard and not all as in scatterInCluster /// /// it will change plans in place //////////////////////////////////////////////////////////////////////////////// void triagens::aql::distributeInClusterRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { bool wasModified = false; if (triagens::arango::ServerState::instance()->isCoordinator()) { // we are a coordinator, we replace the root if it is a modification node // only replace if it is the last node in the plan auto node = plan->root(); TRI_ASSERT(node != nullptr); while (node != nullptr) { // loop until we find a modification node or the end of the plan auto nodeType = node->getType(); if (nodeType == ExecutionNode::INSERT || nodeType == ExecutionNode::REMOVE || nodeType == ExecutionNode::UPDATE || nodeType == ExecutionNode::REPLACE || nodeType == ExecutionNode::UPSERT) { // found a node! break; } if (! node->hasDependency()) { // reached the end opt->addPlan(plan, rule, wasModified); return; } node = node->getFirstDependency(); } TRI_ASSERT(node != nullptr); if (node == nullptr) { THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "logic error"); } ExecutionNode* originalParent = nullptr; { if (node->hasParent()) { auto const& parents = node->getParents(); originalParent = parents[0]; TRI_ASSERT(originalParent != nullptr); TRI_ASSERT(node != plan->root()); } else { TRI_ASSERT(node == plan->root()); } } // when we get here, we have found a matching data-modification node! auto const nodeType = node->getType(); TRI_ASSERT(nodeType == ExecutionNode::INSERT || nodeType == ExecutionNode::REMOVE || nodeType == ExecutionNode::UPDATE || nodeType == ExecutionNode::REPLACE || nodeType == ExecutionNode::UPSERT); Collection const* collection = static_cast<ModificationNode*>(node)->collection(); bool const defaultSharding = collection->usesDefaultSharding(); if (nodeType == ExecutionNode::REMOVE || nodeType == ExecutionNode::UPDATE) { if (! defaultSharding) { // We have to use a ScatterNode. opt->addPlan(plan, rule, wasModified); return; } } // In the INSERT and REPLACE cases we use a DistributeNode... TRI_ASSERT(node->hasDependency()); auto const& deps = node->getDependencies(); if (originalParent != nullptr) { originalParent->removeDependency(node); // unlink the node auto root = plan->root(); plan->unlinkNode(node, true); plan->root(root, true); // fix root node } else { // unlink the node plan->unlinkNode(node, true); plan->root(deps[0], true); // fix root node } // extract database from plan node TRI_vocbase_t* vocbase = static_cast<ModificationNode*>(node)->vocbase(); // insert a distribute node ExecutionNode* distNode = nullptr; Variable const* inputVariable; if (nodeType == ExecutionNode::INSERT || nodeType == ExecutionNode::REMOVE) { TRI_ASSERT(node->getVariablesUsedHere().size() == 1); // in case of an INSERT, the DistributeNode is responsible for generating keys // if none present bool const createKeys = (nodeType == ExecutionNode::INSERT); inputVariable = node->getVariablesUsedHere()[0]; distNode = new DistributeNode(plan, plan->nextId(), vocbase, collection, inputVariable->id, createKeys, true); } else if (nodeType == ExecutionNode::REPLACE) { std::vector<Variable const*> v = node->getVariablesUsedHere(); if (defaultSharding && v.size() > 1) { // We only look into _inKeyVariable inputVariable = v[1]; } else { // We only look into _inDocVariable inputVariable = v[0]; } distNode = new DistributeNode(plan, plan->nextId(), vocbase, collection, inputVariable->id, false, v.size() > 1); } else if (nodeType == ExecutionNode::UPDATE) { std::vector<Variable const*> v = node->getVariablesUsedHere(); if (v.size() > 1) { // If there is a key variable: inputVariable = v[1]; // This is the _inKeyVariable! This works, since we use a ScatterNode // for non-default-sharding attributes. } else { // was only UPDATE <doc> IN <collection> inputVariable = v[0]; } distNode = new DistributeNode(plan, plan->nextId(), vocbase, collection, inputVariable->id, false, v.size() > 1); } else if (nodeType == ExecutionNode::UPSERT) { // an UPSERT nodes has two input variables! std::vector<Variable const*> v(node->getVariablesUsedHere()); TRI_ASSERT(v.size() >= 2); distNode = new DistributeNode(plan, plan->nextId(), vocbase, collection, v[0]->id, v[2]->id, false, true); } else { TRI_ASSERT(false); THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "logic error"); } TRI_ASSERT(distNode != nullptr); plan->registerNode(distNode); distNode->addDependency(deps[0]); // insert a remote node ExecutionNode* remoteNode = new RemoteNode(plan, plan->nextId(), vocbase, collection, "", "", ""); plan->registerNode(remoteNode); remoteNode->addDependency(distNode); // re-link with the remote node node->addDependency(remoteNode); // insert another remote node remoteNode = new RemoteNode(plan, plan->nextId(), vocbase, collection, "", "", ""); plan->registerNode(remoteNode); remoteNode->addDependency(node); // insert a gather node ExecutionNode* gatherNode = new GatherNode(plan, plan->nextId(), vocbase, collection); plan->registerNode(gatherNode); gatherNode->addDependency(remoteNode); if (originalParent != nullptr) { // we did not replace the root node originalParent->addDependency(gatherNode); } else { // we replaced the root node, set a new root node plan->root(gatherNode, true); } wasModified = true; } opt->addPlan(plan, rule, wasModified); } //////////////////////////////////////////////////////////////////////////////// /// @brief move filters up into the cluster distribution part of the plan /// this rule modifies the plan in place /// filters are moved as far up in the plan as possible to make result sets /// as small as possible as early as possible //////////////////////////////////////////////////////////////////////////////// void triagens::aql::distributeFilternCalcToClusterRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { bool modified = false; std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::GATHER, true)); for (auto& n : nodes) { auto const& remoteNodeList = n->getDependencies(); TRI_ASSERT(remoteNodeList.size() > 0); auto rn = remoteNodeList[0]; if (! n->hasParent()) { continue; } std::unordered_set<Variable const*> varsSetHere; auto parents = n->getParents(); while (true) { bool stopSearching = false; auto inspectNode = parents[0]; switch (inspectNode->getType()) { case EN::ENUMERATE_LIST: case EN::SINGLETON: case EN::INSERT: case EN::REMOVE: case EN::REPLACE: case EN::UPDATE: case EN::UPSERT: { for (auto& v : inspectNode->getVariablesSetHere()) { varsSetHere.emplace(v); } parents = inspectNode->getParents(); continue; } case EN::AGGREGATE: case EN::SUBQUERY: case EN::RETURN: case EN::NORESULTS: case EN::SCATTER: case EN::DISTRIBUTE: case EN::GATHER: case EN::ILLEGAL: case EN::REMOTE: case EN::LIMIT: case EN::SORT: case EN::INDEX: case EN::ENUMERATE_COLLECTION: case EN::TRAVERSAL: //do break stopSearching = true; break; case EN::CALCULATION: { auto calc = static_cast<CalculationNode const*>(inspectNode); // check if the expression can be executed on a DB server safely if (! calc->expression()->canRunOnDBServer()) { stopSearching = true; break; } // intentionally fall through here } case EN::FILTER: for (auto& v : inspectNode->getVariablesUsedHere()) { if (varsSetHere.find(v) != varsSetHere.end()) { // do not move over the definition of variables that we need stopSearching = true; break; } } if (! stopSearching) { // remember our cursor... parents = inspectNode->getParents(); // then unlink the filter/calculator from the plan plan->unlinkNode(inspectNode); // and re-insert into plan in front of the remoteNode plan->insertDependency(rn, inspectNode); modified = true; //ready to rumble! } break; } if (stopSearching) { break; } } } opt->addPlan(plan, rule, modified); } //////////////////////////////////////////////////////////////////////////////// /// @brief move sorts up into the cluster distribution part of the plan /// this rule modifies the plan in place /// sorts are moved as far up in the plan as possible to make result sets /// as small as possible as early as possible /// /// filters are not pushed beyond limits //////////////////////////////////////////////////////////////////////////////// void triagens::aql::distributeSortToClusterRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { bool modified = false; std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::GATHER, true)); for (auto& n : nodes) { auto const& remoteNodeList = n->getDependencies(); auto gatherNode = static_cast<GatherNode*>(n); TRI_ASSERT(remoteNodeList.size() > 0); auto rn = remoteNodeList[0]; if (! n->hasParent()) { continue; } auto parents = n->getParents(); while (1) { bool stopSearching = false; auto inspectNode = parents[0]; switch (inspectNode->getType()) { case EN::ENUMERATE_LIST: case EN::SINGLETON: case EN::AGGREGATE: case EN::INSERT: case EN::REMOVE: case EN::REPLACE: case EN::UPDATE: case EN::UPSERT: case EN::CALCULATION: case EN::FILTER: case EN::SUBQUERY: case EN::RETURN: case EN::NORESULTS: case EN::SCATTER: case EN::DISTRIBUTE: case EN::GATHER: case EN::ILLEGAL: case EN::REMOTE: case EN::LIMIT: case EN::INDEX: case EN::TRAVERSAL: case EN::ENUMERATE_COLLECTION: // For all these, we do not want to pull a SortNode further down // out to the DBservers, note that potential FilterNodes and // CalculationNodes that can be moved to the DBservers have // already been moved over by the distribute-filtercalc-to-cluster // rule which is done first. stopSearching = true; break; case EN::SORT: auto thisSortNode = static_cast<SortNode*>(inspectNode); // remember our cursor... parents = inspectNode->getParents(); // then unlink the filter/calculator from the plan plan->unlinkNode(inspectNode); // and re-insert into plan in front of the remoteNode plan->insertDependency(rn, inspectNode); gatherNode->setElements(thisSortNode->getElements()); modified = true; //ready to rumble! } if (stopSearching) { break; } } } opt->addPlan(plan, rule, modified); } //////////////////////////////////////////////////////////////////////////////// /// @brief try to get rid of a RemoteNode->ScatterNode combination which has /// only a SingletonNode and possibly some CalculationNodes as dependencies //////////////////////////////////////////////////////////////////////////////// void triagens::aql::removeUnnecessaryRemoteScatterRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::REMOTE, true)); std::unordered_set<ExecutionNode*> toUnlink; for (auto& n : nodes) { // check if the remote node is preceeded by a scatter node and any number of // calculation and singleton nodes. if yes, remove remote and scatter if (! n->hasDependency()) { continue; } auto const dep = n->getFirstDependency(); if (dep->getType() != EN::SCATTER) { continue; } bool canOptimize = true; auto node = dep; while (node != nullptr) { auto const& d = node->getDependencies(); if (d.size() != 1) { break; } node = d[0]; if (node->getType() != EN::SINGLETON && node->getType() != EN::CALCULATION) { // found some other node type... // this disqualifies the optimization canOptimize = false; break; } if (node->getType() == EN::CALCULATION) { auto calc = static_cast<CalculationNode const*>(node); // check if the expression can be executed on a DB server safely if (! calc->expression()->canRunOnDBServer()) { canOptimize = false; break; } } } if (canOptimize) { toUnlink.emplace(n); toUnlink.emplace(dep); } } if (! toUnlink.empty()) { plan->unlinkNodes(toUnlink); } opt->addPlan(plan, rule, ! toUnlink.empty()); } //////////////////////////////////////////////////////////////////////////////// /// WalkerWorker for undistributeRemoveAfterEnumColl //////////////////////////////////////////////////////////////////////////////// class RemoveToEnumCollFinder final : public WalkerWorker<ExecutionNode> { ExecutionPlan* _plan; std::unordered_set<ExecutionNode*>& _toUnlink; bool _remove; bool _scatter; bool _gather; EnumerateCollectionNode* _enumColl; ExecutionNode* _setter; const Variable* _variable; ExecutionNode* _lastNode; public: RemoveToEnumCollFinder (ExecutionPlan* plan, std::unordered_set<ExecutionNode*>& toUnlink) : _plan(plan), _toUnlink(toUnlink), _remove(false), _scatter(false), _gather(false), _enumColl(nullptr), _setter(nullptr), _variable(nullptr), _lastNode(nullptr) { }; ~RemoveToEnumCollFinder () { } bool before (ExecutionNode* en) override final { switch (en->getType()) { case EN::REMOVE: { if (_remove) { break; } // find the variable we are removing . . . auto rn = static_cast<RemoveNode*>(en); auto varsToRemove = rn->getVariablesUsedHere(); // remove nodes always have one input variable TRI_ASSERT(varsToRemove.size() == 1); _setter = _plan->getVarSetBy(varsToRemove[0]->id); TRI_ASSERT(_setter != nullptr); auto enumColl = _setter; if (_setter->getType() == EN::CALCULATION) { // this should be an attribute access for _key auto cn = static_cast<CalculationNode*>(_setter); if (! cn->expression()->isAttributeAccess()) { break; // abort . . . } // check the variable is the same as the remove variable auto vars = cn->getVariablesSetHere(); if (vars.size() != 1 || vars[0]->id != varsToRemove[0]->id) { break; // abort . . . } // check the remove node's collection is sharded over _key std::vector<std::string> shardKeys = rn->collection()->shardKeys(); if (shardKeys.size() != 1 || shardKeys[0] != TRI_VOC_ATTRIBUTE_KEY) { break; // abort . . . } // set the varsToRemove to the variable in the expression of this // node and also define enumColl varsToRemove = cn->getVariablesUsedHere(); TRI_ASSERT(varsToRemove.size() == 1); enumColl = _plan->getVarSetBy(varsToRemove[0]->id); TRI_ASSERT(_setter != nullptr); } if (enumColl->getType() != EN::ENUMERATE_COLLECTION) { break; // abort . . . } _enumColl = static_cast<EnumerateCollectionNode*>(enumColl); if (_enumColl->collection() != rn->collection()) { break; // abort . . . } _variable = varsToRemove[0]; // the variable we'll remove _remove = true; _lastNode = en; return false; // continue . . . } case EN::REMOTE: { _toUnlink.emplace(en); _lastNode = en; return false; // continue . . . } case EN::DISTRIBUTE: case EN::SCATTER: { if (_scatter) { // met more than one scatter node break; // abort . . . } _scatter = true; _toUnlink.emplace(en); _lastNode = en; return false; // continue . . . } case EN::GATHER: { if (_gather) { // met more than one gather node break; // abort . . . } _gather = true; _toUnlink.emplace(en); _lastNode = en; return false; // continue . . . } case EN::FILTER: { _lastNode = en; return false; // continue . . . } case EN::CALCULATION: { TRI_ASSERT(_setter != nullptr); if (_setter->getType() == EN::CALCULATION && _setter->id() == en->id()) { _lastNode = en; return false; // continue . . . } if (_lastNode == nullptr || _lastNode->getType() != EN::FILTER) { // doesn't match the last filter node break; // abort . . . } auto cn = static_cast<CalculationNode*>(en); auto fn = static_cast<FilterNode*>(_lastNode); // check these are a Calc-Filter pair if (cn->getVariablesSetHere()[0]->id != fn->getVariablesUsedHere()[0]->id) { break; // abort . . . } // check that we are filtering/calculating something with the variable // we are to remove auto varsUsedHere = cn->getVariablesUsedHere(); if (varsUsedHere.size() != 1) { break; //abort . . . } if (varsUsedHere[0]->id != _variable->id) { break; } _lastNode = en; return false; // continue . . . } case EN::ENUMERATE_COLLECTION: { // check that we are enumerating the variable we are to remove // and that we have already seen a remove node TRI_ASSERT(_enumColl != nullptr); if (en->id() != _enumColl->id()) { break; } return true; // reached the end! } case EN::SINGLETON: case EN::ENUMERATE_LIST: case EN::SUBQUERY: case EN::AGGREGATE: case EN::INSERT: case EN::REPLACE: case EN::UPDATE: case EN::UPSERT: case EN::RETURN: case EN::NORESULTS: case EN::ILLEGAL: case EN::LIMIT: case EN::SORT: case EN::TRAVERSAL: case EN::INDEX: { // if we meet any of the above, then we abort . . . } } _toUnlink.clear(); return true; } }; //////////////////////////////////////////////////////////////////////////////// /// @brief recognizes that a RemoveNode can be moved to the shards. //////////////////////////////////////////////////////////////////////////////// void triagens::aql::undistributeRemoveAfterEnumCollRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::REMOVE, true)); std::unordered_set<ExecutionNode*> toUnlink; for (auto& n : nodes) { RemoveToEnumCollFinder finder(plan, toUnlink); n->walk(&finder); } bool modified = false; if (! toUnlink.empty()) { plan->unlinkNodes(toUnlink); modified = true; } opt->addPlan(plan, rule, modified); } //////////////////////////////////////////////////////////////////////////////// /// @brief auxilliary struct for finding common nodes in OR conditions //////////////////////////////////////////////////////////////////////////////// struct CommonNodeFinder { std::vector<AstNode const*> possibleNodes; bool find (AstNode const* node, AstNodeType condition, AstNode const*& commonNode, std::string& commonName) { if (node->type == NODE_TYPE_OPERATOR_BINARY_OR) { return (find(node->getMember(0), condition, commonNode, commonName) && find(node->getMember(1), condition, commonNode, commonName)); } if (node->type == NODE_TYPE_VALUE) { possibleNodes.clear(); return true; } if (node->type == condition || (condition != NODE_TYPE_OPERATOR_BINARY_EQ && ( node->type == NODE_TYPE_OPERATOR_BINARY_LE || node->type == NODE_TYPE_OPERATOR_BINARY_LT || node->type == NODE_TYPE_OPERATOR_BINARY_GE || node->type == NODE_TYPE_OPERATOR_BINARY_GT || node->type == NODE_TYPE_OPERATOR_BINARY_IN))) { auto lhs = node->getMember(0); auto rhs = node->getMember(1); bool const isIn = (node->type == NODE_TYPE_OPERATOR_BINARY_IN && rhs->isArray()); if (node->type == NODE_TYPE_OPERATOR_BINARY_IN && rhs->type == NODE_TYPE_EXPANSION) { // ooh, cannot optimize this (yet) possibleNodes.clear(); return false; } if (! isIn && lhs->isConstant()) { commonNode = rhs; commonName = commonNode->toString(); possibleNodes.clear(); return true; } if (rhs->isConstant()) { commonNode = lhs; commonName = commonNode->toString(); possibleNodes.clear(); return true; } if (rhs->type == NODE_TYPE_FCALL || rhs->type == NODE_TYPE_FCALL_USER || rhs->type == NODE_TYPE_REFERENCE) { commonNode = lhs; commonName = commonNode->toString(); possibleNodes.clear(); return true; } if (! isIn && (lhs->type == NODE_TYPE_FCALL || lhs->type == NODE_TYPE_FCALL_USER || lhs->type == NODE_TYPE_REFERENCE)) { commonNode = rhs; commonName = commonNode->toString(); possibleNodes.clear(); return true; } if (! isIn && (lhs->type == NODE_TYPE_ATTRIBUTE_ACCESS || lhs->type == NODE_TYPE_INDEXED_ACCESS)) { if (possibleNodes.size() == 2) { for (size_t i = 0; i < 2; i++) { if (lhs->toString() == possibleNodes[i]->toString()) { commonNode = possibleNodes[i]; commonName = commonNode->toString(); possibleNodes.clear(); return true; } } // don't return, must consider the other side of the condition } else { possibleNodes.emplace_back(lhs); } } if (rhs->type == NODE_TYPE_ATTRIBUTE_ACCESS || rhs->type == NODE_TYPE_INDEXED_ACCESS) { if (possibleNodes.size() == 2) { for (size_t i = 0; i < 2; i++) { if (rhs->toString() == possibleNodes[i]->toString()) { commonNode = possibleNodes[i]; commonName = commonNode->toString(); possibleNodes.clear(); return true; } } return false; } else { possibleNodes.emplace_back(rhs); return true; } } } possibleNodes.clear(); return (! commonName.empty()); } }; //////////////////////////////////////////////////////////////////////////////// /// @brief auxilliary struct for the OR-to-IN conversion //////////////////////////////////////////////////////////////////////////////// struct OrToInConverter { std::vector<AstNode const*> valueNodes; CommonNodeFinder finder; AstNode const* commonNode = nullptr; std::string commonName; AstNode* buildInExpression (Ast* ast) { // the list of comparison values auto list = ast->createNodeArray(); for (auto& x : valueNodes) { list->addMember(x); } // return a new IN operator node return ast->createNodeBinaryOperator(NODE_TYPE_OPERATOR_BINARY_IN, commonNode->clone(ast), list); } bool canConvertExpression (AstNode const* node) { if (finder.find(node, NODE_TYPE_OPERATOR_BINARY_EQ, commonNode, commonName)) { return canConvertExpressionWalker(node); } else if (finder.find(node, NODE_TYPE_OPERATOR_BINARY_IN, commonNode, commonName)) { return canConvertExpressionWalker(node); } return false; } bool canConvertExpressionWalker (AstNode const* node) { if (node->type == NODE_TYPE_OPERATOR_BINARY_OR) { return (canConvertExpressionWalker(node->getMember(0)) && canConvertExpressionWalker(node->getMember(1))); } if (node->type == NODE_TYPE_OPERATOR_BINARY_EQ) { auto lhs = node->getMember(0); auto rhs = node->getMember(1); if (canConvertExpressionWalker(rhs) && ! canConvertExpressionWalker(lhs)) { valueNodes.emplace_back(lhs); return true; } if (canConvertExpressionWalker(lhs) && ! canConvertExpressionWalker(rhs)) { valueNodes.emplace_back(rhs); return true; } // if canConvertExpressionWalker(lhs) and canConvertExpressionWalker(rhs), then one of // the equalities in the OR statement is of the form x == x // fall-through intentional } else if (node->type == NODE_TYPE_OPERATOR_BINARY_IN) { auto lhs = node->getMember(0); auto rhs = node->getMember(1); if (canConvertExpressionWalker(lhs) && ! canConvertExpressionWalker(rhs) && rhs->isArray()) { size_t const n = rhs->numMembers(); for (size_t i = 0; i < n; ++i) { valueNodes.emplace_back(rhs->getMemberUnchecked(i)); } return true; } // fall-through intentional } else if (node->type == NODE_TYPE_REFERENCE || node->type == NODE_TYPE_ATTRIBUTE_ACCESS || node->type == NODE_TYPE_INDEXED_ACCESS) { // get a string representation of the node for comparisons return (node->toString() == commonName); } return false; } }; //////////////////////////////////////////////////////////////////////////////// /// @brief this rule replaces expressions of the type: /// x.val == 1 || x.val == 2 || x.val == 3 // with // x.val IN [1,2,3] // when the OR conditions are present in the same FILTER node, and refer to the // same (single) attribute. //////////////////////////////////////////////////////////////////////////////// void triagens::aql::replaceOrWithInRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::FILTER, true)); bool modified = false; for (auto const& n : nodes) { TRI_ASSERT(n->hasDependency()); auto const dep = n->getFirstDependency(); if (dep->getType() != EN::CALCULATION) { continue; } auto fn = static_cast<FilterNode*>(n); auto inVar = fn->getVariablesUsedHere(); auto cn = static_cast<CalculationNode*>(dep); auto outVar = cn->getVariablesSetHere(); if (outVar.size() != 1 || outVar[0]->id != inVar[0]->id) { continue; } if (cn->expression()->node()->type != NODE_TYPE_OPERATOR_BINARY_OR) { continue; } OrToInConverter converter; if (converter.canConvertExpression(cn->expression()->node())) { ExecutionNode* newNode = nullptr; auto inNode = converter.buildInExpression(plan->getAst()); Expression* expr = new Expression(plan->getAst(), inNode); try { TRI_IF_FAILURE("OptimizerRules::replaceOrWithInRuleOom") { THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG); } newNode = new CalculationNode(plan, plan->nextId(), expr, outVar[0]); } catch (...) { delete expr; throw; } plan->registerNode(newNode); plan->replaceNode(cn, newNode); modified = true; } } opt->addPlan(plan, rule, modified); } struct RemoveRedundantOr { AstNode const* bestValue = nullptr; AstNodeType comparison; bool inclusive; bool isComparisonSet = false; CommonNodeFinder finder; AstNode const* commonNode = nullptr; std::string commonName; AstNode* createReplacementNode (Ast* ast) { TRI_ASSERT(commonNode != nullptr); TRI_ASSERT(bestValue != nullptr); TRI_ASSERT(isComparisonSet == true); return ast->createNodeBinaryOperator(comparison, commonNode->clone(ast), bestValue); } bool isInclusiveBound (AstNodeType type) { return (type == NODE_TYPE_OPERATOR_BINARY_GE || type == NODE_TYPE_OPERATOR_BINARY_LE); } int isCompatibleBound (AstNodeType type, AstNode const* value) { if ((comparison == NODE_TYPE_OPERATOR_BINARY_LE || comparison == NODE_TYPE_OPERATOR_BINARY_LT) && (type == NODE_TYPE_OPERATOR_BINARY_LE || type == NODE_TYPE_OPERATOR_BINARY_LT)) { return -1; //high bound } else if ((comparison == NODE_TYPE_OPERATOR_BINARY_GE || comparison == NODE_TYPE_OPERATOR_BINARY_GT) && (type == NODE_TYPE_OPERATOR_BINARY_GE || type == NODE_TYPE_OPERATOR_BINARY_GT)) { return 1; //low bound } return 0; //incompatible bounds } // returns false if the existing value is better and true if the input value is // better bool compareBounds (AstNodeType type, AstNode const* value, int lowhigh) { int cmp = CompareAstNodes(bestValue, value, true); if (cmp == 0 && (isInclusiveBound(comparison) != isInclusiveBound(type))) { return (isInclusiveBound(type) ? true : false); } return (cmp * lowhigh == 1); } bool hasRedundantCondition (AstNode const* node) { if (finder.find(node, NODE_TYPE_OPERATOR_BINARY_LT, commonNode, commonName)) { return hasRedundantConditionWalker(node); } return false; } bool hasRedundantConditionWalker (AstNode const* node) { AstNodeType type = node->type; if (type == NODE_TYPE_OPERATOR_BINARY_OR) { return (hasRedundantConditionWalker(node->getMember(0)) && hasRedundantConditionWalker(node->getMember(1))); } if (type == NODE_TYPE_OPERATOR_BINARY_LE || type == NODE_TYPE_OPERATOR_BINARY_LT || type == NODE_TYPE_OPERATOR_BINARY_GE || type == NODE_TYPE_OPERATOR_BINARY_GT) { auto lhs = node->getMember(0); auto rhs = node->getMember(1); if (hasRedundantConditionWalker(rhs) && ! hasRedundantConditionWalker(lhs) && lhs->isConstant()) { if (! isComparisonSet) { comparison = Ast::ReverseOperator(type); bestValue = lhs; isComparisonSet = true; return true; } int lowhigh = isCompatibleBound(Ast::ReverseOperator(type), lhs); if (lowhigh == 0) { return false; } if (compareBounds(type, lhs, lowhigh)) { comparison = Ast::ReverseOperator(type); bestValue = lhs; } return true; } if (hasRedundantConditionWalker(lhs) && ! hasRedundantConditionWalker(rhs) && rhs->isConstant()) { if (! isComparisonSet) { comparison = type; bestValue = rhs; isComparisonSet = true; return true; } int lowhigh = isCompatibleBound(type, rhs); if (lowhigh == 0) { return false; } if (compareBounds(type, rhs, lowhigh)) { comparison = type; bestValue = rhs; } return true; } // if hasRedundantConditionWalker(lhs) and // hasRedundantConditionWalker(rhs), then one of the conditions in the OR // statement is of the form x == x fall-through intentional } else if (type == NODE_TYPE_REFERENCE || type == NODE_TYPE_ATTRIBUTE_ACCESS || type == NODE_TYPE_INDEXED_ACCESS) { // get a string representation of the node for comparisons return (node->toString() == commonName); } return false; } }; void triagens::aql::removeRedundantOrRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::FILTER, true)); bool modified = false; for (auto const& n : nodes) { TRI_ASSERT(n->hasDependency()); auto const dep = n->getFirstDependency(); if (dep->getType() != EN::CALCULATION) { continue; } auto fn = static_cast<FilterNode*>(n); auto inVar = fn->getVariablesUsedHere(); auto cn = static_cast<CalculationNode*>(dep); auto outVar = cn->getVariablesSetHere(); if (outVar.size() != 1 || outVar[0]->id != inVar[0]->id) { continue; } if (cn->expression()->node()->type != NODE_TYPE_OPERATOR_BINARY_OR) { continue; } RemoveRedundantOr remover; if (remover.hasRedundantCondition(cn->expression()->node())) { Expression* expr = nullptr; ExecutionNode* newNode = nullptr; auto astNode = remover.createReplacementNode(plan->getAst()); expr = new Expression(plan->getAst(), astNode); try { newNode = new CalculationNode(plan, plan->nextId(), expr, outVar[0]); } catch (...) { delete expr; throw; } plan->registerNode(newNode); plan->replaceNode(cn, newNode); modified = true; } } opt->addPlan(plan, rule, modified); } //////////////////////////////////////////////////////////////////////////////// /// @brief remove $OLD and $NEW variables from data-modification statements /// if not required //////////////////////////////////////////////////////////////////////////////// void triagens::aql::removeDataModificationOutVariablesRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { bool modified = false; std::vector<ExecutionNode::NodeType> const types = { EN::REMOVE, EN::INSERT, EN::UPDATE, EN::REPLACE, EN::UPSERT }; std::vector<ExecutionNode*> nodes(plan->findNodesOfType(types, true)); for (auto const& n : nodes) { auto node = static_cast<ModificationNode*>(n); TRI_ASSERT(node != nullptr); auto varsUsedLater = n->getVarsUsedLater(); if (varsUsedLater.find(node->getOutVariableOld()) == varsUsedLater.end()) { // "$OLD" is not used later node->clearOutVariableOld(); modified = true; } if (varsUsedLater.find(node->getOutVariableNew()) == varsUsedLater.end()) { // "$NEW" is not used later node->clearOutVariableNew(); modified = true; } } opt->addPlan(plan, rule, modified); } //////////////////////////////////////////////////////////////////////////////// /// @brief patch UPDATE statement on single collection that iterates over the /// entire collection to operate in batches //////////////////////////////////////////////////////////////////////////////// void triagens::aql::patchUpdateStatementsRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { bool modified = false; // not need to dive into subqueries here, as UPDATE needs to be on the top level std::vector<ExecutionNode*> nodes(plan->findNodesOfType(EN::UPDATE, false)); for (auto const& n : nodes) { // we should only get through here a single time auto node = static_cast<ModificationNode*>(n); TRI_ASSERT(node != nullptr); auto& options = node->getOptions(); if (! options.readCompleteInput) { // already ok continue; } auto const collection = node->collection(); auto dep = n->getFirstDependency(); while (dep != nullptr) { auto const type = dep->getType(); if (type == EN::ENUMERATE_LIST || type == EN::INDEX || type == EN::SUBQUERY) { // not suitable modified = false; break; } if (type == EN::ENUMERATE_COLLECTION) { auto collectionNode = static_cast<EnumerateCollectionNode const*>(dep); if (collectionNode->collection() != collection) { // different collection, not suitable modified = false; break; } else { modified = true; } } if (type == EN::TRAVERSAL) { // unclear what will be read by the traversal modified = false; break; } dep = dep->getFirstDependency(); } if (modified) { options.readCompleteInput = false; } } // always re-add the original plan, be it modified or not // only a flag in the plan will be modified opt->addPlan(plan, rule, modified); } //////////////////////////////////////////////////////////////////////////////// /// @brief merges filter nodes into graph traversal nodes //////////////////////////////////////////////////////////////////////////////// void triagens::aql::mergeFilterIntoTraversalRule (Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) { std::vector<ExecutionNode*> tNodes(plan->findNodesOfType(EN::TRAVERSAL, true)); if (tNodes.empty()) { opt->addPlan(plan, rule, false); return; } // These are all the end nodes where we start std::vector<ExecutionNode*> nodes(plan->findEndNodes(true)); bool planAltered = false; for (auto const& n : nodes) { TraversalConditionFinder finder(plan, &planAltered); n->walk(&finder); } opt->addPlan(plan, rule, planAltered); } // Local Variables: // mode: outline-minor // outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|// --SECTION--\\|/// @\\}\\)" // End:<|fim▁end|>
auto expression = cn->expression(); if (expression != nullptr) { collectConstantAttributes(const_cast<AstNode*>(expression->node()));
<|file_name|>fill-triangles.go<|end_file_name|><|fim▁begin|>// http://www.sunshine2k.de/coding/java/TriangleRasterization/TriangleRasterization.html // http://blackpawn.com/texts/pointinpoly/ // https://fgiesen.wordpress.com/2013/02/10/optimizing-the-basic-rasterizer/ package main import ( "fmt" "image" "image/color" "image/draw" "log" "os" "runtime" "github.com/qeedquan/go-media/sdl" "github.com/qeedquan/go-media/sdl/sdlgfx" ) var ( window *sdl.Window renderer *sdl.Renderer texture *sdl.Texture canvas *image.RGBA mode int triangle = [3]image.Point{ {150, 641}, {356, 67}, {56, 245}, } ) func main() { runtime.LockOSThread() initSDL() for { event() blit() } } func ck(err error) { if err != nil { log.Fatal(err) } } func initSDL() { err := sdl.Init(sdl.INIT_VIDEO | sdl.INIT_TIMER) ck(err) sdl.SetHint(sdl.HINT_RENDER_SCALE_QUALITY, "best") w, h := 1280, 800 wflag := sdl.WINDOW_RESIZABLE window, renderer, err = sdl.CreateWindowAndRenderer(w, h, wflag) ck(err) window.SetTitle("Fill Triangles") resizeWindow(w, h) } func resizeWindow(w, h int) { var err error texture, err = renderer.CreateTexture(sdl.PIXELFORMAT_ABGR8888, sdl.TEXTUREACCESS_STREAMING, w, h) ck(err) canvas = image.NewRGBA(image.Rect(0, 0, w, h)) } func event() { modes := []string{ "Scanline", "Bresenham", "Barycentric1", "Barycentric2", } for { ev := sdl.PollEvent() if ev == nil { break } switch ev := ev.(type) { case sdl.QuitEvent: os.Exit(0) case sdl.KeyDownEvent: switch ev.Sym { case sdl.K_ESCAPE: os.Exit(0) case sdl.K_1: mode = 0 case sdl.K_2: mode = 1 case sdl.K_3: mode = 2 case sdl.K_4: mode = 3 } fmt.Println("Drawing Mode: ", modes[mode]) case sdl.MouseButtonDownEvent: if 0 <= ev.Button-1 && int(ev.Button-1) < len(triangle) { triangle[ev.Button-1] = image.Pt(int(ev.X), int(ev.Y)) } case sdl.WindowEvent: switch ev.Event { case sdl.WINDOWEVENT_RESIZED: w, h := int(ev.Data[0]), int(ev.Data[1]) resizeWindow(w, h) } } } } func blit() { renderer.Clear() draw.Draw(canvas, canvas.Bounds(), image.NewUniform(color.RGBA{100, 100, 120, 255}), image.ZP, draw.Over) switch mode { case 0: blitScanline(triangle[0], triangle[1], triangle[2], color.RGBA{255, 255, 255, 255}) case 1: blitBresenham(triangle[0], triangle[1], triangle[2], color.RGBA{255, 255, 255, 255}) case 2: blitBarycentric1(triangle[0], triangle[1], triangle[2], color.RGBA{255, 255, 255, 255}) case 3: blitBarycentric2(triangle[0], triangle[1], triangle[2], color.RGBA{255, 255, 255, 255}) } texture.Update(nil, canvas.Pix, canvas.Stride) renderer.Copy(texture, nil, nil) pal := []color.RGBA{ color.RGBA{255, 0, 0, 255}, color.RGBA{0, 255, 0, 255}, color.RGBA{0, 0, 255, 255}, } for i, p := range triangle { sdlgfx.FilledCircle(renderer, p.X, p.Y, 10, pal[i]) } renderer.Present() } // scanline algorithm: // sort the triangle vertices in order of increasing y // this makes it so that as we loop through we are moving down // (more importantly, all edges are moving in the same direction if we do this) // the next step is to split the triangle into two parts, top half and bottom half // the top half and bottom half of the triangle will have a constant slope // such that as dy increase by 1, dx/dy is the same for that portion // so we can just increase the x coordinate by dx/dy and draw a horizontal line // for them. func blitScanline(a, b, c image.Point, col color.RGBA) { a, b, c = sortPointsByY(a, b, c) d := image.Pt( int(float64(a.X)+(float64(b.Y-a.Y)/float64(c.Y-a.Y))*float64(c.X-a.X)), b.Y, ) blitScanlineBottom(a, b, d, col) blitScanlineTop(b, d, c, color.RGBA{255 - col.R, 255 - col.G, 255 - col.B, 255}) } func blitScanlineBottom(a, b, c image.Point, col color.RGBA) { invslope1 := float64(b.X-a.X) / float64(b.Y-a.Y) invslope2 := float64(c.X-a.X) / float64(c.Y-a.Y) curx1 := float64(a.X) curx2 := float64(a.X) for scanlineY := a.Y; scanlineY <= b.Y; scanlineY++ { blitHline(int(curx1), int(curx2), scanlineY, col) curx1 += invslope1 curx2 += invslope2<|fim▁hole|> } } func blitScanlineTop(a, b, c image.Point, col color.RGBA) { invslope1 := float64(c.X-a.X) / float64(c.Y-a.Y) invslope2 := float64(c.X-b.X) / float64(c.Y-b.Y) curx1 := float64(c.X) curx2 := float64(c.X) for scanlineY := c.Y; scanlineY > a.Y; scanlineY-- { blitHline(int(curx1), int(curx2), scanlineY, col) curx1 -= invslope1 curx2 -= invslope2 } } func blitHline(x0, x1, y int, col color.RGBA) { if x1 < x0 { x0, x1 = x1, x0 } for i := x0; i <= x1; i++ { canvas.Set(i, y, col) } } // bresenham algorithm works the same as scanline where // you split the triangle into top and bottom, but it is fancier because // it uses bresenham line drawing to figure out the x coordinate boundary // for each scanline rather than increasing by constant slope, but it // gives the same answer func blitBresenham(a, b, c image.Point, col color.RGBA) { a, b, c = sortPointsByY(a, b, c) a, b = blitBresenhamFill(a, b, a, c, col) blitBresenhamFill(a, c, b, c, color.RGBA{255 - col.R, 255 - col.G, 255 - col.B, 255}) } func blitBresenhamFill(a, b, c, d image.Point, col color.RGBA) (p, q image.Point) { x0, y0 := a.X, a.Y x1, y1 := b.X, b.Y dx := abs(x1 - x0) dy := -abs(y1 - y0) sx, sy := -1, -1 if x0 < x1 { sx = 1 } if y0 < y1 { sy = 1 } e := dx + dy x2, y2 := c.X, c.Y x3, y3 := d.X, d.Y ndx := abs(x3 - x2) ndy := -abs(y3 - y2) nsx, nsy := -1, -1 if x2 < x3 { nsx = 1 } if y2 < y3 { nsy = 1 } ne := ndx + ndy loop: for { blitHline(x0, x2, y0, col) for { if x0 == x1 && y0 == y1 { break loop } e2 := 2 * e if e2 >= dy { e += dy x0 += sx } if e2 <= dx { e += dx y0 += sy break } } for { if x2 == x3 && y2 == y3 { break loop } e2 := 2 * ne if e2 >= ndy { ne += ndy x2 += nsx } if e2 <= ndx { ne += ndx y2 += nsy break } } } p = image.Pt(x0, y0) q = image.Pt(x2, y2) return } // barycentric algorithm is where you find the bounding box // of the triangle, loop through the box and do a point // in triangle test, if it is inside, color it // slowest algorithm but very elegant func blitBarycentric1(a, b, c image.Point, col color.RGBA) { x0 := min(a.X, min(b.X, c.X)) x1 := max(a.X, max(b.X, c.X)) y0 := min(a.Y, min(b.Y, c.Y)) y1 := max(a.Y, max(b.Y, c.Y)) for y := y0; y <= y1; y++ { for x := x0; x <= x1; x++ { p := image.Pt(x, y) if insideTriangle(p, a, b, c) { canvas.Set(x, y, col) } } } } // optimized version of barycentric blitting, where instead // of testing per pixel whether or not it is in a triangle // we can walk directly in barycentric space (w0, w1, w2) // and figure out using those coefficients whether or not // we are inside the triangle or not func blitBarycentric2(a, b, c image.Point, col color.RGBA) { x0 := min(a.X, min(b.X, c.X)) x1 := max(a.X, max(b.X, c.X)) y0 := min(a.Y, min(b.Y, c.Y)) y1 := max(a.Y, max(b.Y, c.Y)) A01 := a.Y - b.Y B01 := b.X - a.X A12 := b.Y - c.Y B12 := c.X - b.X A20 := c.Y - a.Y B20 := a.X - c.X p := image.Pt(x0, y0) w0r := sign(b, c, p) w1r := sign(c, a, p) w2r := sign(a, b, p) for y := y0; y <= y1; y++ { w0, w1, w2 := w0r, w1r, w2r for x := x0; x <= x1; x++ { s1, s2, s3 := w0 < 0, w1 < 0, w2 < 0 if s1 == s2 && s2 == s3 { canvas.Set(x, y, col) } w0 += A12 w1 += A20 w2 += A01 } w0r += B12 w1r += B20 w2r += B01 } } func abs(x int) int { if x < 0 { return -x } return x } func min(a, b int) int { if a < b { return a } return b } func max(a, b int) int { if a > b { return a } return b } func sign(a, b, c image.Point) int { return (a.X-c.X)*(b.Y-c.Y) - (b.X-c.X)*(a.Y-c.Y) } func insideTriangle(p, a, b, c image.Point) bool { b1 := sign(p, a, b) < 0 b2 := sign(p, b, c) < 0 b3 := sign(p, c, a) < 0 return b1 == b2 && b2 == b3 } func sortPointsByY(a, b, c image.Point) (p, q, r image.Point) { if a.Y > b.Y { a, b = b, a } if a.Y > c.Y { a, c = c, a } if b.Y > c.Y { b, c = c, b } return a, b, c }<|fim▁end|>
<|file_name|>test_printers.py<|end_file_name|><|fim▁begin|>"""Test the print-to-python-file module This just uses the simpleparsegrammar declaration, which is parsed, then linearised, then loaded as a Python module. """ import os, unittest import test_grammarparser testModuleFile = 'test_printers_garbage.py' class PrintersTests(test_grammarparser.SimpleParseGrammarTests): def setUp( self ): from simpleparse import simpleparsegrammar, parser, printers, baseparser p = parser.Parser( simpleparsegrammar.declaration, 'declarationset') open(testModuleFile,'w').write(printers.asGenerator( p._generator )) import test_printers_garbage reload( test_printers_garbage ) class RParser( test_printers_garbage.Parser, baseparser.BaseParser ): pass self.recursiveParser = RParser() def tearDown( self ): try: os.remove( testModuleFile ) except IOError, err: pass def doBasicTest(self, parserName, testValue, expected, ): result = self.recursiveParser.parse( testValue, production=parserName ) assert result == expected, '''\nexpected:%s\n got:%s\n'''%( expected, result ) def getSuite():<|fim▁hole|> if __name__ == "__main__": unittest.main(defaultTest="getSuite")<|fim▁end|>
return unittest.makeSuite(PrintersTests,'test')
<|file_name|>button.cpp<|end_file_name|><|fim▁begin|>#include "../widgetslib.h" #include "button.h" #include "Meta/math/vector4.h" #include "vertex.h" #include "Meta/keyvalue/metatable_impl.h" #include "Meta/serialize/serializetable_impl.h" #include "fontloader.h" #include "imageloader.h" METATABLE_BEGIN_BASE(Engine::Widgets::Button, Engine::Widgets::WidgetBase) MEMBER(mText) MEMBER(mFontSize) MEMBER(mPivot) PROPERTY(Font, font, setFont) PROPERTY(Image, image, setImage) METATABLE_END(Engine::Widgets::Button) SERIALIZETABLE_INHERIT_BEGIN(Engine::Widgets::Button, Engine::Widgets::WidgetBase) FIELD(mText) FIELD(mFontSize) FIELD(mPivot) ENCAPSULATED_FIELD(mFont, fontName, setFontName) ENCAPSULATED_FIELD(Image, imageName, setImageName) SERIALIZETABLE_END(Engine::Widgets::Button) namespace Engine { namespace Widgets { void Button::setImageName(std::string_view name) { setImage(Resources::ImageLoader::getSingleton().get(name)); } void Button::setImage(Resources::ImageLoader::ResourceType *image) { mImage = image; } std::string_view Button::imageName() const { return mImage ? mImage->name() : ""; } Resources::ImageLoader::ResourceType *Button::image() const { return mImage; } Resources::ImageLoader::ResourceType *Button::resource() const { return mImage; } Threading::SignalStub<> &Button::clickEvent() { return mClicked; } std::vector<std::pair<std::vector<Vertex>, TextureSettings>> Button::vertices(const Vector3 &screenSize) { std::vector<std::pair<std::vector<Vertex>, TextureSettings>> returnSet; std::vector<Vertex> result; Vector3 pos = (getEffectivePosition() * screenSize) / screenSize; Vector3 size = (getEffectiveSize() * screenSize) / screenSize; pos.z = depth(); Vector4 color = mHovered ? Vector4 { 1.0f, 0.1f, 0.1f, 1.0f } : Vector4 { 0.4f, 0.4f, 0.4f, 1.0f }; Vector3 v = pos; result.push_back({ v, color, { 0.0f, 0.0f } }); v.x += size.x; result.push_back({ v, color, { 1.0f, 0.0f } }); v.y += size.y; result.push_back({ v, color, { 1.0f, 1.0f } }); result.push_back({ v, color, { 1.0f, 1.0f } }); v.x -= size.x; result.push_back({ v, color, { 0.0f, 1.0f } }); v.y -= size.y; result.push_back({ v, color, { 0.0f, 0.0f } }); returnSet.push_back({ result, {} }); if (mFont.available() /*&& mFont->mTexture.available()*/) { //mFont->setPersistent(true); std::pair<std::vector<Vertex>, TextureSettings> fontVertices = renderText(mText, pos, size.xy(), mFont, size.z * mFontSize, mPivot, screenSize); if (!fontVertices.first.empty()) returnSet.push_back(fontVertices); } return returnSet; } bool Button::injectPointerEnter(const Input::PointerEventArgs &arg) { mHovered = true; return true; } bool Button::injectPointerLeave(const Input::PointerEventArgs &arg) { mHovered = false; mClicking = false; return true; } bool Button::injectPointerPress(const Input::PointerEventArgs &arg) { mClicking = true; return true; } bool Button::injectPointerRelease(const Input::PointerEventArgs &arg) { if (mClicking) emitClicked(); return true; } <|fim▁hole|> { mClicked.emit(); } WidgetClass Button::getClass() const { return WidgetClass::BUTTON; } std::string_view Button::fontName() const { return mFont.name(); } void Button::setFontName(std::string_view name) { mFont.load(name); } Render::FontLoader::ResourceType *Button::font() const { return mFont.resource(); } void Button::setFont(Render::FontLoader::HandleType font) { mFont = std::move(font); } } }<|fim▁end|>
void Button::emitClicked()
<|file_name|>plIgnoreComponent.cpp<|end_file_name|><|fim▁begin|>/*==LICENSE==* CyanWorlds.com Engine - MMOG client, server and tools Copyright (C) 2011 Cyan Worlds, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Additional permissions under GNU GPL version 3 section 7 If you modify this Program, or any covered work, by linking or combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK, NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK (or a modified version of those libraries), containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA, PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the licensors of this Program grant you additional permission to convey the resulting work. Corresponding Source for a non-source form of such a combination shall include the source code for the parts of OpenSSL and IJG JPEG Library used as well as that of the covered work. You can contact Cyan Worlds, Inc. by email [email protected] or by snail mail at: Cyan Worlds, Inc. 14617 N Newport Hwy Mead, WA 99021 *==LICENSE==*/ #include "HeadSpin.h" #include "plComponent.h" #include "plComponentReg.h" #include "plMiscComponents.h" #include "MaxMain/plMaxNode.h" #include "resource.h" #include <iparamm2.h> #pragma hdrstop #include "MaxMain/plPlasmaRefMsgs.h" #include "pnSceneObject/plSceneObject.h" #include "pnSceneObject/plCoordinateInterface.h" #include "pnSceneObject/plDrawInterface.h" #include "plMessage/plSimStateMsg.h" #include "pnMessage/plEnableMsg.h" #include "MaxMain/plPluginResManager.h" void DummyCodeIncludeFuncIgnore() {} ///////////////////////////////////////////////////////////////////////////////////////////////// // // Ignore Component // // //Class that accesses the paramblock below. class plIgnoreComponent : public plComponent { public: plIgnoreComponent(); // SetupProperties - Internal setup and write-only set properties on the MaxNode. No reading // of properties on the MaxNode, as it's still indeterminant. bool SetupProperties(plMaxNode *pNode, plErrorMsg *pErrMsg); bool Convert(plMaxNode *node, plErrorMsg *pErrMsg); virtual void CollectNonDrawables(INodeTab& nonDrawables); }; //Max desc stuff necessary below. CLASS_DESC(plIgnoreComponent, gIgnoreDesc, "Ignore", "Ignore", COMP_TYPE_IGNORE, Class_ID(0x48326288, 0x528a3dea)) enum { kIgnoreMeCheckBx }; ParamBlockDesc2 gIgnoreBk ( plComponent::kBlkComp, _T("Ignore"), 0, &gIgnoreDesc, P_AUTO_CONSTRUCT + P_AUTO_UI, plComponent::kRefComp, IDD_COMP_IGNORE, IDS_COMP_IGNORES, 0, 0, NULL, kIgnoreMeCheckBx, _T("Ignore"), TYPE_BOOL, 0, 0, p_default, TRUE, p_ui, TYPE_SINGLECHEKBOX, IDC_COMP_IGNORE_CKBX, end, end ); plIgnoreComponent::plIgnoreComponent() { fClassDesc = &gIgnoreDesc; fClassDesc->MakeAutoParamBlocks(this); } void plIgnoreComponent::CollectNonDrawables(INodeTab& nonDrawables) { if (fCompPB->GetInt(kIgnoreMeCheckBx)) { AddTargetsToList(nonDrawables); } } // SetupProperties - Internal setup and write-only set properties on the MaxNode. No reading // of properties on the MaxNode, as it's still indeterminant. bool plIgnoreComponent::SetupProperties(plMaxNode *pNode, plErrorMsg *pErrMsg) { if (fCompPB->GetInt(kIgnoreMeCheckBx)) pNode->SetCanConvert(false); return true; } bool plIgnoreComponent::Convert(plMaxNode *node, plErrorMsg *pErrMsg) { return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// // // IgnoreLite Component // // //Class that accesses the paramblock below. class plIgnoreLiteComponent : public plComponent { public: enum { kSelectedOnly }; enum LightState { kTurnOn, kTurnOff, kToggle }; public: plIgnoreLiteComponent(); void SetState(LightState s); // SetupProperties - Internal setup and write-only set properties on the MaxNode. No reading // of properties on the MaxNode, as it's still indeterminant. bool SetupProperties(plMaxNode *pNode, plErrorMsg *pErrMsg) { return true; } bool PreConvert(plMaxNode *pNode, plErrorMsg *pErrMsg) { return true; } bool Convert(plMaxNode *node, plErrorMsg *pErrMsg) { return true; } }; class plIgnoreLiteProc : public ParamMap2UserDlgProc { public: BOOL DlgProc(TimeValue t, IParamMap2 *map, HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam) { switch (msg) { case WM_COMMAND: if( (HIWORD(wParam) == BN_CLICKED) && (LOWORD(wParam) == IDC_COMP_IGNORELITE_ON) ) { plIgnoreLiteComponent* ilc = (plIgnoreLiteComponent*)map->GetParamBlock()->GetOwner(); ilc->SetState(plIgnoreLiteComponent::kTurnOn); return TRUE; } if( (HIWORD(wParam) == BN_CLICKED) && (LOWORD(wParam) == IDC_COMP_IGNORELITE_OFF) ) { plIgnoreLiteComponent* ilc = (plIgnoreLiteComponent*)map->GetParamBlock()->GetOwner(); ilc->SetState(plIgnoreLiteComponent::kTurnOff); return TRUE; } if( (HIWORD(wParam) == BN_CLICKED) && (LOWORD(wParam) == IDC_COMP_IGNORELITE_TOGGLE) ) { plIgnoreLiteComponent* ilc = (plIgnoreLiteComponent*)map->GetParamBlock()->GetOwner(); ilc->SetState(plIgnoreLiteComponent::kToggle); return TRUE; } break; } return false; } void DeleteThis() {} }; static plIgnoreLiteProc gIgnoreLiteProc; //Max desc stuff necessary below. CLASS_DESC(plIgnoreLiteComponent, gIgnoreLiteDesc, "Control Max Light", "ControlLite", COMP_TYPE_IGNORE, IGNORELITE_CID) ParamBlockDesc2 gIgnoreLiteBk ( plComponent::kBlkComp, _T("IgnoreLite"), 0, &gIgnoreLiteDesc, P_AUTO_CONSTRUCT + P_AUTO_UI, plComponent::kRefComp, IDD_COMP_IGNORELITE, IDS_COMP_IGNORELITES, 0, 0, &gIgnoreLiteProc, plIgnoreLiteComponent::kSelectedOnly, _T("SelectedOnly"), TYPE_BOOL, 0, 0, p_default, FALSE, p_ui, TYPE_SINGLECHEKBOX, IDC_COMP_IGNORELITE_SELECTED, end, end ); plIgnoreLiteComponent::plIgnoreLiteComponent() { fClassDesc = &gIgnoreLiteDesc; fClassDesc->MakeAutoParamBlocks(this); } void plIgnoreLiteComponent::SetState(LightState s) { BOOL selectedOnly = fCompPB->GetInt(kSelectedOnly); int numTarg = NumTargets(); int i; for( i = 0; i < numTarg; i++ ) { plMaxNodeBase* targ = GetTarget(i); if( targ ) { if( selectedOnly && !targ->Selected() ) continue; Object *obj = targ->EvalWorldState(TimeValue(0)).obj; if (obj && (obj->SuperClassID() == SClass_ID(LIGHT_CLASS_ID))) { LightObject* liObj = (LightObject*)obj; switch( s ) { case kTurnOn: liObj->SetUseLight(true); break; case kTurnOff: liObj->SetUseLight(false); break; case kToggle: liObj->SetUseLight(!liObj->GetUseLight()); break; } } } } } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Barney Component // // //Class that accesses the paramblock below. class plBarneyComponent : public plComponent { public: plBarneyComponent(); // SetupProperties - Internal setup and write-only set properties on the MaxNode. No reading // of properties on the MaxNode, as it's still indeterminant. bool SetupProperties(plMaxNode *pNode, plErrorMsg *pErrMsg); bool Convert(plMaxNode *node, plErrorMsg *pErrMsg); }; //Max desc stuff necessary below. CLASS_DESC(plBarneyComponent, gBarneyDesc, "Barney", "Barney", COMP_TYPE_IGNORE, Class_ID(0x376955dc, 0x2fec50ae)) ParamBlockDesc2 gBarneyBk ( plComponent::kBlkComp, _T("Barney"), 0, &gBarneyDesc, P_AUTO_CONSTRUCT + P_AUTO_UI, plComponent::kRefComp, IDD_COMP_BARNEY, IDS_COMP_BARNEYS, 0, 0, NULL, end ); plBarneyComponent::plBarneyComponent() { fClassDesc = &gBarneyDesc; fClassDesc->MakeAutoParamBlocks(this); } // SetupProperties - Internal setup and write-only set properties on the MaxNode. No reading // of properties on the MaxNode, as it's still indeterminant. bool plBarneyComponent::SetupProperties(plMaxNode *pNode, plErrorMsg *pErrMsg) { pNode->SetCanConvert(false); pNode->SetIsBarney(true); return true; } bool plBarneyComponent::Convert(plMaxNode *node, plErrorMsg *pErrMsg) { return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// // // NoShow Component // // //Class that accesses the paramblock below. class plNoShowComponent : public plComponent { public:<|fim▁hole|> enum { kShowable, kAffectDraw, kAffectPhys }; public: plNoShowComponent(); virtual void CollectNonDrawables(INodeTab& nonDrawables); // SetupProperties - Internal setup and write-only set properties on the MaxNode. No reading // of properties on the MaxNode, as it's still indeterminant. bool SetupProperties(plMaxNode *pNode, plErrorMsg *pErrMsg); bool Convert(plMaxNode *node, plErrorMsg *pErrMsg); }; const Class_ID COMP_NOSHOW_CID(0x41cb2b85, 0x615932c6); //Max desc stuff necessary below. CLASS_DESC(plNoShowComponent, gNoShowDesc, "NoShow", "NoShow", COMP_TYPE_IGNORE, COMP_NOSHOW_CID) ParamBlockDesc2 gNoShowBk ( plComponent::kBlkComp, _T("NoShow"), 0, &gNoShowDesc, P_AUTO_CONSTRUCT + P_AUTO_UI, plComponent::kRefComp, IDD_COMP_NOSHOW, IDS_COMP_NOSHOW, 0, 0, NULL, plNoShowComponent::kShowable, _T("Showable"), TYPE_BOOL, 0, 0, p_default, FALSE, p_ui, TYPE_SINGLECHEKBOX, IDC_COMP_NOSHOW_SHOWABLE, end, plNoShowComponent::kAffectDraw, _T("AffectDraw"), TYPE_BOOL, 0, 0, p_default, TRUE, p_ui, TYPE_SINGLECHEKBOX, IDC_COMP_NOSHOW_AFFECTDRAW, end, plNoShowComponent::kAffectPhys, _T("AffectPhys"), TYPE_BOOL, 0, 0, p_default, FALSE, p_ui, TYPE_SINGLECHEKBOX, IDC_COMP_NOSHOW_AFFECTPHYS, end, end ); plNoShowComponent::plNoShowComponent() { fClassDesc = &gNoShowDesc; fClassDesc->MakeAutoParamBlocks(this); } // SetupProperties - Internal setup and write-only set properties on the MaxNode. No reading // of properties on the MaxNode, as it's still indeterminant. bool plNoShowComponent::SetupProperties(plMaxNode *pNode, plErrorMsg *pErrMsg) { if( !fCompPB->GetInt(kShowable) ) { if( fCompPB->GetInt(kAffectDraw) ) pNode->SetDrawable(false); if( fCompPB->GetInt(kAffectPhys) ) pNode->SetPhysical(false); } return true; } bool plNoShowComponent::Convert(plMaxNode *node, plErrorMsg *pErrMsg) { plSceneObject* obj = node->GetSceneObject(); if( !obj ) return true; if( fCompPB->GetInt(kShowable) ) { if( fCompPB->GetInt(kAffectDraw) ) { plEnableMsg* eMsg = new plEnableMsg(nil, plEnableMsg::kDisable, plEnableMsg::kDrawable); eMsg->AddReceiver(obj->GetKey()); eMsg->Send(); } if( fCompPB->GetInt(kAffectPhys) ) { hsAssert(0, "Who uses this?"); // plEventGroupEnableMsg* pMsg = new plEventGroupEnableMsg; // pMsg->SetFlags(plEventGroupEnableMsg::kCollideOff | plEventGroupEnableMsg::kReportOff); // pMsg->AddReceiver(obj->GetKey()); // pMsg->Send(); } #if 0 plDrawInterface* di = node->GetDrawInterface(); if( di && { di->SetProperty(plDrawInterface::kDisable, true); } #endif } return true; } void plNoShowComponent::CollectNonDrawables(INodeTab& nonDrawables) { if( fCompPB->GetInt(kAffectDraw) ) AddTargetsToList(nonDrawables); }<|fim▁end|>
<|file_name|>xgboost_custom.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2015 by Contributors // This file contains the customization implementations of R module // to change behavior of libxgboost #include <xgboost/logging.h> #include "src/common/random.h" #include "./xgboost_R.h" // redirect the messages to R's console. namespace dmlc { void CustomLogMessage::Log(const std::string& msg) { Rprintf("%s\n", msg.c_str()); } } // namespace dmlc // implements rabit error handling. extern "C" { void XGBoostAssert_R(int exp, const char *fmt, ...); void XGBoostCheck_R(int exp, const char *fmt, ...); } namespace rabit { namespace utils { extern "C" { void (*Printf)(const char *fmt, ...) = Rprintf; void (*Assert)(int exp, const char *fmt, ...) = XGBoostAssert_R; void (*Check)(int exp, const char *fmt, ...) = XGBoostCheck_R; void (*Error)(const char *fmt, ...) = error; } } } namespace xgboost { ConsoleLogger::~ConsoleLogger() { dmlc::CustomLogMessage::Log(log_stream_.str()); } TrackerLogger::~TrackerLogger() { dmlc::CustomLogMessage::Log(log_stream_.str()); } } // namespace xgboost namespace xgboost { namespace common { // redirect the nath functions. bool CheckNAN(double v) { return ISNAN(v); } double LogGamma(double v) { return lgammafn(v); } // customize random engine. void CustomGlobalRandomEngine::seed(CustomGlobalRandomEngine::result_type val) { // ignore the seed<|fim▁hole|> // use R's PRNG to replacd CustomGlobalRandomEngine::result_type CustomGlobalRandomEngine::operator()() { return static_cast<result_type>( std::floor(unif_rand() * CustomGlobalRandomEngine::max())); } } // namespace common } // namespace xgboost<|fim▁end|>
}
<|file_name|>playground.d.ts<|end_file_name|><|fim▁begin|>/* tslint:disable */ /* eslint-disable */ /**<|fim▁hole|>* @param {string} context * @returns {string} */ export function render(template: string, context: string): string;<|fim▁end|>
* @param {string} template
<|file_name|>hr_payroll_account.py<|end_file_name|><|fim▁begin|>#-*- coding:utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import time from datetime import date, datetime, timedelta from openerp.osv import fields, osv from openerp.tools import float_compare, float_is_zero from openerp.tools.translate import _ from openerp.exceptions import UserError class hr_payslip_line(osv.osv): ''' Payslip Line ''' _inherit = 'hr.payslip.line' def _get_partner_id(self, cr, uid, payslip_line, credit_account, context=None): """ Get partner_id of slip line to use in account_move_line """ # use partner of salary rule or fallback on employee's address partner_id = payslip_line.salary_rule_id.register_id.partner_id.id or \ payslip_line.slip_id.employee_id.address_home_id.id if credit_account: if payslip_line.salary_rule_id.register_id.partner_id or \ payslip_line.salary_rule_id.account_credit.internal_type in ('receivable', 'payable'): return partner_id else: if payslip_line.salary_rule_id.register_id.partner_id or \ payslip_line.salary_rule_id.account_debit.internal_type in ('receivable', 'payable'): return partner_id return False class hr_payslip(osv.osv): ''' Pay Slip ''' _inherit = 'hr.payslip' _description = 'Pay Slip' _columns = { 'date': fields.date('Date Account', states={'draft': [('readonly', False)]}, readonly=True, help="Keep empty to use the period of the validation(Payslip) date."), 'journal_id': fields.many2one('account.journal', 'Salary Journal',states={'draft': [('readonly', False)]}, readonly=True, required=True), 'move_id': fields.many2one('account.move', 'Accounting Entry', readonly=True, copy=False), } def _get_default_journal(self, cr, uid, context=None): journal_obj = self.pool.get('account.journal') res = journal_obj.search(cr, uid, [('type', '=', 'general')]) if res: return res[0] return False _defaults = { 'journal_id': _get_default_journal, } def create(self, cr, uid, vals, context=None): if context is None: context = {} if 'journal_id' in context: vals.update({'journal_id': context.get('journal_id')}) return super(hr_payslip, self).create(cr, uid, vals, context=context) def onchange_contract_id(self, cr, uid, ids, date_from, date_to, employee_id=False, contract_id=False, context=None): contract_obj = self.pool.get('hr.contract') res = super(hr_payslip, self).onchange_contract_id(cr, uid, ids, date_from=date_from, date_to=date_to, employee_id=employee_id, contract_id=contract_id, context=context) journal_id = contract_id and contract_obj.browse(cr, uid, contract_id, context=context).journal_id.id or False res['value'].update({'journal_id': journal_id}) return res def cancel_sheet(self, cr, uid, ids, context=None): move_pool = self.pool.get('account.move') move_ids = [] move_to_cancel = [] for slip in self.browse(cr, uid, ids, context=context): if slip.move_id: move_ids.append(slip.move_id.id) if slip.move_id.state == 'posted': move_to_cancel.append(slip.move_id.id) move_pool.button_cancel(cr, uid, move_to_cancel, context=context) move_pool.unlink(cr, uid, move_ids, context=context) return super(hr_payslip, self).cancel_sheet(cr, uid, ids, context=context) def process_sheet(self, cr, uid, ids, context=None): move_pool = self.pool.get('account.move') hr_payslip_line_pool = self.pool['hr.payslip.line'] precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Payroll') timenow = time.strftime('%Y-%m-%d') for slip in self.browse(cr, uid, ids, context=context): line_ids = [] debit_sum = 0.0 credit_sum = 0.0 date = timenow name = _('Payslip of %s') % (slip.employee_id.name) move = { 'narration': name, 'ref': slip.number, 'journal_id': slip.journal_id.id, 'date': date, } for line in slip.details_by_salary_rule_category: amt = slip.credit_note and -line.total or line.total if float_is_zero(amt, precision_digits=precision): continue debit_account_id = line.salary_rule_id.account_debit.id credit_account_id = line.salary_rule_id.account_credit.id if debit_account_id: debit_line = (0, 0, { 'name': line.name, 'partner_id': hr_payslip_line_pool._get_partner_id(cr, uid, line, credit_account=False, context=context), 'account_id': debit_account_id, 'journal_id': slip.journal_id.id, 'date': date, 'debit': amt > 0.0 and amt or 0.0, 'credit': amt < 0.0 and -amt or 0.0, 'analytic_account_id': line.salary_rule_id.analytic_account_id and line.salary_rule_id.analytic_account_id.id or False, 'tax_line_id': line.salary_rule_id.account_tax_id and line.salary_rule_id.account_tax_id.id or False, }) line_ids.append(debit_line) debit_sum += debit_line[2]['debit'] - debit_line[2]['credit'] if credit_account_id: credit_line = (0, 0, { 'name': line.name, 'partner_id': hr_payslip_line_pool._get_partner_id(cr, uid, line, credit_account=True, context=context), 'account_id': credit_account_id, 'journal_id': slip.journal_id.id, 'date': date, 'debit': amt < 0.0 and -amt or 0.0, 'credit': amt > 0.0 and amt or 0.0, 'analytic_account_id': line.salary_rule_id.analytic_account_id and line.salary_rule_id.analytic_account_id.id or False, 'tax_line_id': line.salary_rule_id.account_tax_id and line.salary_rule_id.account_tax_id.id or False, }) line_ids.append(credit_line) credit_sum += credit_line[2]['credit'] - credit_line[2]['debit'] if float_compare(credit_sum, debit_sum, precision_digits=precision) == -1: acc_id = slip.journal_id.default_credit_account_id.id if not acc_id: raise UserError(_('The Expense Journal "%s" has not properly configured the Credit Account!') % (slip.journal_id.name)) adjust_credit = (0, 0, { 'name': _('Adjustment Entry'), 'date': timenow, 'partner_id': False, 'account_id': acc_id, 'journal_id': slip.journal_id.id, 'date': date, 'debit': 0.0, 'credit': debit_sum - credit_sum, })<|fim▁hole|> line_ids.append(adjust_credit) elif float_compare(debit_sum, credit_sum, precision_digits=precision) == -1: acc_id = slip.journal_id.default_debit_account_id.id if not acc_id: raise UserError(_('The Expense Journal "%s" has not properly configured the Debit Account!') % (slip.journal_id.name)) adjust_debit = (0, 0, { 'name': _('Adjustment Entry'), 'partner_id': False, 'account_id': acc_id, 'journal_id': slip.journal_id.id, 'date': date, 'debit': credit_sum - debit_sum, 'credit': 0.0, }) line_ids.append(adjust_debit) move.update({'line_ids': line_ids}) move_id = move_pool.create(cr, uid, move, context=context) self.write(cr, uid, [slip.id], {'move_id': move_id, 'date' : date}, context=context) move_pool.post(cr, uid, [move_id], context=context) return super(hr_payslip, self).process_sheet(cr, uid, [slip.id], context=context) class hr_salary_rule(osv.osv): _inherit = 'hr.salary.rule' _columns = { 'analytic_account_id':fields.many2one('account.analytic.account', 'Analytic Account', domain=[('account_type', '=', 'normal')]), 'account_tax_id':fields.many2one('account.tax', 'Tax'), 'account_debit': fields.many2one('account.account', 'Debit Account', domain=[('deprecated', '=', False)]), 'account_credit': fields.many2one('account.account', 'Credit Account', domain=[('deprecated', '=', False)]), } class hr_contract(osv.osv): _inherit = 'hr.contract' _description = 'Employee Contract' _columns = { 'analytic_account_id':fields.many2one('account.analytic.account', 'Analytic Account', domain=[('account_type', '=', 'normal')]), 'journal_id': fields.many2one('account.journal', 'Salary Journal'), } class hr_payslip_run(osv.osv): _inherit = 'hr.payslip.run' _description = 'Payslip Run' _columns = { 'journal_id': fields.many2one('account.journal', 'Salary Journal', states={'draft': [('readonly', False)]}, readonly=True, required=True), } def _get_default_journal(self, cr, uid, context=None): journal_obj = self.pool.get('account.journal') res = journal_obj.search(cr, uid, [('type', '=', 'general')]) if res: return res[0] return False _defaults = { 'journal_id': _get_default_journal, }<|fim▁end|>
<|file_name|>domain.py<|end_file_name|><|fim▁begin|>import inspect import os from abc import ABC, ABCMeta from dataclasses import dataclass from datetime import datetime, tzinfo from types import FunctionType, WrapperDescriptorType from typing import ( Any, Callable, Dict, Generic, Iterable, List, Optional, Type, TypeVar, Union, cast, ) from uuid import UUID, uuid4 from eventsourcing.utils import get_method_name, get_topic, resolve_topic # noinspection SpellCheckingInspection TZINFO: tzinfo = resolve_topic(os.getenv("TZINFO_TOPIC", "datetime:timezone.utc")) # noinspection PyTypeChecker TAggregate = TypeVar("TAggregate", bound="Aggregate") class MetaDomainEvent(ABCMeta): def __new__(mcs, name: str, bases: tuple, cls_dict: dict) -> "MetaDomainEvent": event_cls = ABCMeta.__new__(mcs, name, bases, cls_dict) event_cls = dataclass(frozen=True)(event_cls) # type: ignore return event_cls def __init__(cls, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class DomainEvent(ABC, metaclass=MetaDomainEvent): # noinspection PyUnresolvedReferences """ Base class for domain events, such as aggregate :class:`AggregateEvent` and aggregate :class:`Snapshot`. Constructor arguments: :param UUID originator_id: ID of originating aggregate. :param int originator_version: version of originating aggregate. :param datetime timestamp: date-time of the event """ originator_id: UUID originator_version: int timestamp: datetime class AggregateEvent(DomainEvent, Generic[TAggregate]): # noinspection PyUnresolvedReferences """ Base class for aggregate events. Subclasses will model decisions made by the domain model aggregates. Constructor arguments: :param UUID originator_id: ID of originating aggregate. :param int originator_version: version of originating aggregate. :param datetime timestamp: date-time of the event """ def mutate(self, obj: Optional[TAggregate]) -> Optional[TAggregate]: """ Changes the state of the aggregate according to domain event attributes. """ # Check event is next in its sequence. # Use counting to follow the sequence. # assert isinstance(obj, Aggregate), (type(obj), self) assert obj is not None next_version = obj.version + 1 if self.originator_version != next_version: raise VersionError(self.originator_version, next_version) if self.apply(obj) is not None: # type: ignore raise TypeError( f"Unexpected value returned from " f"{type(self).apply.__qualname__}(). Values " f"returned from 'apply' methods are discarded." ) # Update the aggregate version. obj.version = self.originator_version # Update the modified time. obj.modified_on = self.timestamp return obj # noinspection PyShadowingNames def apply(self, aggregate: TAggregate) -> None: """ Applies the domain event to the aggregate. """ class AggregateCreated(AggregateEvent["Aggregate"]): # noinspection PyUnresolvedReferences """ Domain event for when aggregate is created. Constructor arguments: :param UUID originator_id: ID of originating aggregate. :param int originator_version: version of originating aggregate. :param datetime timestamp: date-time of the event :param str originator_topic: topic for the aggregate class """ originator_topic: str def mutate(self, obj: Optional[TAggregate]) -> TAggregate: """ Constructs aggregate instance defined by domain event object attributes. """ assert obj is None # Copy the event attributes. kwargs = self.__dict__.copy() # Resolve originator topic. aggregate_class: Type[TAggregate] = resolve_topic( kwargs.pop("originator_topic") ) # Construct and return aggregate object. agg: TAggregate = aggregate_class.__new__(aggregate_class) # Separate the base class keywords arguments. base_kwargs = { "id": kwargs.pop("originator_id"), "version": kwargs.pop("originator_version"), "timestamp": kwargs.pop("timestamp"), } # Call the base class init method. Aggregate.__base_init__(agg, **base_kwargs) # Call the aggregate class init method. # noinspection PyTypeChecker init_method = agg.__init__ # type: ignore # Provide the id, if the init method expects it. if aggregate_class._init_mentions_id: kwargs["id"] = base_kwargs["id"] # noinspection PyArgumentList init_method(**kwargs) return agg class CommandMethodDecorator: def __init__(self, arg: Union[Callable, str, Type[AggregateEvent]]): self.is_name_inferred_from_method = False self.given_event_cls: Optional[Type[AggregateEvent]] = None self.event_cls_name: Optional[str] = None self.is_property_setter = False self.property_setter_arg_name: Optional[str] = None self.is_decorating_a_property = False self.decorated_property: Optional[property] = None self.original_method: Optional[FunctionType] = None # Initialising an instance. if isinstance(arg, str): # Decorator used with an explicit name. self.initialise_from_explicit_name(event_cls_name=arg) elif isinstance(arg, type) and issubclass(arg, AggregateEvent): self.initialise_from_event_cls(event_cls=arg) elif isinstance(arg, FunctionType): # Decorator used without explicit name. self.initialise_from_decorated_method(original_method=arg) elif isinstance(arg, property): method_name = arg.fset.__name__ raise TypeError( f"@event on {method_name}() property setter requires event class name" ) elif isinstance(arg, staticmethod): raise TypeError( f"{arg.__func__.__name__}() staticmethod can't be " f"used to update aggregate state" ) elif isinstance(arg, classmethod): # noinspection SpellCheckingInspection raise TypeError( f"{arg.__func__.__name__}() classmethod can't be " f"used to update aggregate state" ) else: raise TypeError(f"Unsupported usage: {type(arg)} is not a str or function") def initialise_from_decorated_method(self, original_method: FunctionType) -> None: self.original_method = original_method original_method_name = original_method.__name__ if original_method_name != "__init__": self.is_name_inferred_from_method = True self.event_cls_name = "".join( [s.capitalize() for s in original_method_name.split("_")] ) _check_no_variable_params(self.original_method) def initialise_from_event_cls(self, event_cls: Type[AggregateEvent]) -> None: self.given_event_cls = event_cls def initialise_from_explicit_name(self, event_cls_name: str) -> None: if event_cls_name == "": raise ValueError("Can't use empty string as name of event class") self.event_cls_name = event_cls_name def __call__(self, *args: Any, **kwargs: Any) -> Any: # Calling an instance. # noinspection SpellCheckingInspection if self.original_method is None: # Decorator doesn't yet know what method is being decorated, # so decorator must have been specified with an explicit # event name or class, so we're still initialising... assert len(kwargs) == 0, "Unsupported usage" assert len(args) == 1, "Unsupported usage" arg = args[0] # assert isinstance(args[0], FunctionType), args[0] if isinstance(arg, FunctionType): # Decorating a function. self.original_method = arg _check_no_variable_params(self.original_method) elif isinstance(arg, property): # Decorating a property. self.is_decorating_a_property = True self.decorated_property = arg if arg.fset is None: assert arg.fget is not None method_name = arg.fget.__name__ raise TypeError( f"@event can't decorate {method_name}() property getter" ) assert isinstance(arg.fset, FunctionType) self.original_method = arg.fset assert self.original_method setter_arg_names = list(inspect.signature(arg.fset).parameters) assert len(setter_arg_names) == 2 self.property_setter_arg_name = setter_arg_names[1] _check_no_variable_params(self.original_method) else: raise ValueError( f"Unsupported usage: {type(arg)} is not a str or a FunctionType" ) if self.given_event_cls: if self.given_event_cls in original_methods: name = self.given_event_cls.__name__ raise TypeError( f"{name} event class used in more than one decorator" ) # Set decorated event apply() method on given event class. if "apply" in self.given_event_cls.__dict__: name = self.given_event_cls.__name__ raise TypeError(f"{name} event class has unexpected apply() method") # self.given_event_cls.apply = DecoratedEvent.apply # type: ignore setattr( # noqa: B010 self.given_event_cls, "apply", DecoratedEvent.apply ) # Register the decorated method under the given event class. original_methods[self.given_event_cls] = self.original_method return self else: # Initialised decorator was called directly, presumably by # a decorating property that has this decorator as its fset. # So trigger an event. assert self.is_property_setter assert self.property_setter_arg_name assert len(args) == 2 assert len(kwargs) == 0 assert isinstance(args[0], Aggregate) aggregate_instance = args[0] bound = BoundCommandMethodDecorator(self, aggregate_instance) property_setter_arg_value = args[1] kwargs = {self.property_setter_arg_name: property_setter_arg_value} bound.trigger(**kwargs) def __get__( self, instance: Optional[TAggregate], owner: "MetaAggregate" ) -> Union["BoundCommandMethodDecorator", "UnboundCommandMethodDecorator"]: if self.is_decorating_a_property: assert self.decorated_property return self.decorated_property.__get__(instance, owner) else: if instance is None: return UnboundCommandMethodDecorator(self) else: return BoundCommandMethodDecorator(self, instance) def __set__(self, instance: TAggregate, value: Any) -> None: assert self.is_decorating_a_property # Set decorated property. b = BoundCommandMethodDecorator(self, instance) assert self.property_setter_arg_name kwargs = {self.property_setter_arg_name: value} b.trigger(**kwargs) def event( arg: Optional[Union[FunctionType, str, Type[AggregateEvent]]] = None ) -> CommandMethodDecorator: """ Can be used to decorate an aggregate method so that when the method is called an event is triggered. The body of the method will be used to apply the event to the aggregate, both when the event is triggered and when the aggregate is reconstructed from stored events. .. code-block:: python class MyAggregate(Aggregate): @event("NameChanged") def set_name(self, name: str): self.name = name ...is equivalent to... .. code-block:: python class MyAggregate(Aggregate): def set_name(self, name: str): self.trigger_event(self.NameChanged, name=name) class NameChanged(Aggregate.Event): name: str def apply(self, aggregate): aggregate.name = self.name In the example above, the event "NameChanged" is defined automatically by inspecting the signature of the `set_name()` method. If it is preferred to declare the event class explicitly, for example to define upcasting of old events, the event class itself can be mentioned in the event decorator rather than just providing the name of the event as a string. .. code-block:: python class MyAggregate(Aggregate): class NameChanged(Aggregate.Event): name: str @event(NameChanged) def set_name(self, name: str): aggregate.name = self.name """ if arg is None: return event # type: ignore else: return CommandMethodDecorator(arg) triggers = event class UnboundCommandMethodDecorator: """ Wraps an EventDecorator instance when attribute is accessed on an aggregate class. """ # noinspection PyShadowingNames def __init__(self, event_decorator: CommandMethodDecorator): """ :param CommandMethodDecorator event_decorator: """ self.event_decorator = event_decorator assert event_decorator.original_method self.__qualname__ = event_decorator.original_method.__qualname__ self.__name__ = event_decorator.original_method.__name__ class BoundCommandMethodDecorator: """ Wraps an EventDecorator instance when attribute is accessed on an aggregate so that the aggregate methods can be accessed. """ # noinspection PyShadowingNames def __init__(self, event_decorator: CommandMethodDecorator, aggregate: TAggregate): """ :param CommandMethodDecorator event_decorator: :param Aggregate aggregate: """ assert event_decorator.original_method self.event_decorator = event_decorator self.__qualname__ = event_decorator.original_method.__qualname__ self.__name__ = event_decorator.original_method.__name__ self.aggregate = aggregate def trigger(self, *args: Any, **kwargs: Any) -> None: assert isinstance(self.event_decorator, CommandMethodDecorator) # for PyCharm assert self.event_decorator.original_method kwargs = _coerce_args_to_kwargs( self.event_decorator.original_method, args, kwargs ) if self.event_decorator.given_event_cls: event_cls = self.event_decorator.given_event_cls else: assert self.event_decorator.event_cls_name event_cls = getattr(self.aggregate, self.event_decorator.event_cls_name) self.aggregate.trigger_event(event_cls, **kwargs) def __call__(self, *args: Any, **kwargs: Any) -> None: self.trigger(*args, **kwargs) original_methods: Dict[MetaDomainEvent, FunctionType] = {} class DecoratedEvent(AggregateEvent): # noinspection PyShadowingNames def apply(self, aggregate: TAggregate) -> None: """ Applies event to aggregate by calling method decorated by @event. """ event_obj_dict = dict(self.__dict__) event_obj_dict.pop("originator_id") event_obj_dict.pop("originator_version") event_obj_dict.pop("timestamp") original_method = original_methods[type(self)] method_signature = inspect.signature(original_method) # args = [] # for name, param in method_signature.parameters.items(): for name in method_signature.parameters: if name == "self": continue # if param.kind == param.POSITIONAL_ONLY: # args.append(event_obj_dict.pop(name)) # original_method(aggregate, *args, **event_obj_dict) returned_value = original_method(aggregate, **event_obj_dict) if returned_value is not None: raise TypeError( f"Unexpected value returned from " f"{original_method.__qualname__}(). Values " f"returned from 'apply' methods are discarded." ) TDomainEvent = TypeVar("TDomainEvent", bound=DomainEvent) TAggregateEvent = TypeVar("TAggregateEvent", bound=AggregateEvent) TAggregateCreated = TypeVar("TAggregateCreated", bound=AggregateCreated) def _check_no_variable_params( method: Union[FunctionType, WrapperDescriptorType] ) -> None: assert isinstance(method, (FunctionType, WrapperDescriptorType)), type(method) for param in inspect.signature(method).parameters.values(): if param.kind is param.VAR_POSITIONAL: raise TypeError("variable positional parameters not supported") # Todo: Support VAR_POSITIONAL? # annotations["__star_args__"] = "typing.Any" elif param.kind is param.VAR_KEYWORD: # Todo: Support VAR_KEYWORD? # annotations["__star_kwargs__"] = "typing.Any" raise TypeError("variable keyword parameters not supported") def _coerce_args_to_kwargs( method: Union[FunctionType, WrapperDescriptorType], args: Iterable[Any], kwargs: Dict[str, Any], expects_id: bool = False, ) -> Dict[str, Any]: assert isinstance(method, (FunctionType, WrapperDescriptorType)) method_signature = inspect.signature(method) copy_kwargs = dict(kwargs) args = tuple(args) positional_names = [] keyword_defaults = {} required_positional = [] required_keyword_only = [] if expects_id: positional_names.append("id") required_positional.append("id") for name, param in method_signature.parameters.items(): if name == "self": continue # elif param.kind in (param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD): if param.kind is param.KEYWORD_ONLY: required_keyword_only.append(name) if param.kind is param.POSITIONAL_OR_KEYWORD: positional_names.append(name) if param.default == param.empty: required_positional.append(name) if param.default != param.empty: keyword_defaults[name] = param.default # if not required_keyword_only and not positional_names: # if args or kwargs: # raise TypeError(f"{method.__name__}() takes no args") for name in kwargs: if name not in required_keyword_only and name not in positional_names: raise TypeError( f"{get_method_name(method)}() got an unexpected " f"keyword argument '{name}'" ) counter = 0 len_args = len(args) if len_args > len(positional_names): msg = ( f"{get_method_name(method)}() takes {len(positional_names) + 1} " f"positional argument{'' if len(positional_names) + 1 == 1 else 's'} " f"but {len_args + 1} were given" ) raise TypeError(msg) required_positional_not_in_kwargs = [ n for n in required_positional if n not in kwargs ] num_missing = len(required_positional_not_in_kwargs) - len_args if num_missing > 0: missing_names = [ f"'{name}'" for name in required_positional_not_in_kwargs[len_args:] ] msg = ( f"{get_method_name(method)}() missing {num_missing} required positional " f"argument{'' if num_missing == 1 else 's'}: " ) raise_missing_names_type_error(missing_names, msg) for name in positional_names: if counter + 1 > len_args: break if name not in kwargs: copy_kwargs[name] = args[counter] counter += 1 else: raise TypeError( f"{get_method_name(method)}() got multiple values for argument '{name}'" ) missing_keyword_only_arguments = [] for name in required_keyword_only: if name not in kwargs: missing_keyword_only_arguments.append(name) if missing_keyword_only_arguments: missing_names = [f"'{name}'" for name in missing_keyword_only_arguments] msg = ( f"{get_method_name(method)}() missing {len(missing_names)} " f"required keyword-only argument" f"{'' if len(missing_names) == 1 else 's'}: " ) raise_missing_names_type_error(missing_names, msg) for name, value in keyword_defaults.items(): if name not in copy_kwargs: copy_kwargs[name] = value return copy_kwargs def raise_missing_names_type_error(missing_names: List[str], msg: str) -> None: msg += missing_names[0] if len(missing_names) == 2: msg += f" and {missing_names[1]}" elif len(missing_names) > 2: msg += ", " + ", ".join(missing_names[1:-1]) msg += f", and {missing_names[-1]}" raise TypeError(msg) class MetaAggregate(ABCMeta): _annotations_mention_id = False _init_mentions_id = False INITIAL_VERSION = 1 def __new__(mcs, *args: Any, **kwargs: Any) -> "MetaAggregate": try: args[2]["__annotations__"].pop("id") except KeyError: pass else: args[2]["_annotations_mention_id"] = True cls = ABCMeta.__new__(mcs, *args) cls = dataclass(eq=False, repr=False)(cls) return cast(MetaAggregate, cls) def __init__( cls, *args: Any, created_event_name: Optional[str] = None, ) -> None: super().__init__(*args) # Prepare created event class. created_event_classes = {} try: created_event_class = cls.__dict__["_created_event_class"] if created_event_name: raise TypeError( "Can't use both '_created_event_class' and 'created_event_name'" ) except KeyError: created_event_class = None if isinstance(cls.__dict__["__init__"], CommandMethodDecorator): init_decorator: CommandMethodDecorator = cls.__dict__["__init__"] init_method = init_decorator.original_method if created_event_name: raise TypeError( "Can't use both 'created_event_name' and __init__ @event decorator" ) elif created_event_class: raise TypeError( "Can't use both '_created_event_class' and __init__ @event " "decorator" ) elif init_decorator.event_cls_name: created_event_name = init_decorator.event_cls_name elif init_decorator.given_event_cls: created_event_class = init_decorator.given_event_cls else: raise TypeError( "Neither name nor class given to __init__ @event decorator" ) cls.__init__ = init_method # type: ignore else: init_method = cls.__dict__["__init__"] assert isinstance(init_method, FunctionType) for name, value in tuple(cls.__dict__.items()): if isinstance(value, type) and issubclass(value, AggregateCreated): created_event_classes[name] = value # Use the class as the created class, if so named. if created_event_name in created_event_classes: created_event_class = created_event_classes[created_event_name] elif created_event_class is None: if len(created_event_classes) == 0 or created_event_name: if not created_event_name: created_event_name = "Created" # Define a "created" event for this class. created_cls_annotations = {} _check_no_variable_params(init_method) method_signature = inspect.signature(init_method) for param_name in method_signature.parameters: if param_name == "self": continue if param_name == "id": cls._init_mentions_id = True continue created_cls_annotations[param_name] = "typing.Any" created_event_class = type( created_event_name, (AggregateCreated,), { "__annotations__": created_cls_annotations, "__module__": cls.__module__, "__qualname__": ".".join( [cls.__qualname__, created_event_name] ), }, ) setattr(cls, created_event_name, created_event_class) elif len(created_event_classes) == 1: created_event_class = list(created_event_classes.values())[0] cls._created_event_class = created_event_class # Prepare the subsequent event classes. for attribute in tuple(cls.__dict__.values()): # Watch out for @property that sits over an @event. if isinstance(attribute, property) and isinstance( attribute.fset, CommandMethodDecorator ): attribute = attribute.fset if attribute.is_name_inferred_from_method: # We don't want name inferred from property (not past participle). method_name = attribute.original_method.__name__ raise TypeError( f"@event under {method_name}() property setter requires event " f"class name" ) # Attribute is a property decorating an event decorator. attribute.is_property_setter = True # Attribute is an event decorator. if isinstance(attribute, CommandMethodDecorator): # Prepare the subsequent aggregate events. original_method = attribute.original_method assert isinstance(original_method, FunctionType) method_signature = inspect.signature(original_method) annotations = {} for param_name in method_signature.parameters: if param_name == "self": continue elif attribute.is_property_setter: assert len(method_signature.parameters) == 2 attribute.property_setter_arg_name = param_name annotations[param_name] = "typing.Any" # Todo: Improve this? if not attribute.given_event_cls: assert attribute.event_cls_name event_cls_name = attribute.event_cls_name # Check event class isn't already defined. if event_cls_name in cls.__dict__: raise TypeError( f"{event_cls_name} event already defined on {cls.__name__}" ) event_cls_qualname = ".".join([cls.__qualname__, event_cls_name]) event_cls_dict = { "__annotations__": annotations, "__module__": cls.__module__, "__qualname__": event_cls_qualname, } event_cls = MetaDomainEvent( event_cls_name, (DecoratedEvent,), event_cls_dict ) original_methods[event_cls] = original_method setattr(cls, event_cls_name, event_cls) # Inspect the parameters of the create_id method. cls._create_id_param_names = [] for name, param in inspect.signature(cls.create_id).parameters.items(): if param.kind in [param.KEYWORD_ONLY, param.POSITIONAL_OR_KEYWORD]: cls._create_id_param_names.append(name) def __call__(cls: "MetaAggregate", *args: Any, **kwargs: Any) -> TAggregate: # noinspection PyTypeChecker self_init: WrapperDescriptorType = cls.__init__ # type: ignore kwargs = _coerce_args_to_kwargs( self_init, args, kwargs, expects_id=cls._annotations_mention_id ) if cls._created_event_class is None: raise TypeError("attribute '_created_event_class' not set on class") else: new_aggregate: TAggregate = cls._create( event_class=cls._created_event_class, # id=id, **kwargs, ) return new_aggregate # noinspection PyUnusedLocal @staticmethod def create_id(**kwargs: Any) -> UUID: """ Returns a new aggregate ID. """ return uuid4() # noinspection PyShadowingBuiltins def _create( cls, event_class: Type[TAggregateCreated], *, id: Optional[UUID] = None, **kwargs: Any, ) -> TAggregate: """ Factory method to construct a new aggregate object instance. """ # Construct the domain event class, # with an ID and version, and the # a topic for the aggregate class. create_id_kwargs = { k: v for k, v in kwargs.items() if k in cls._create_id_param_names } try: created_event: TAggregateCreated = event_class( # type: ignore originator_topic=get_topic(cls), originator_id=id or cls.create_id(**create_id_kwargs), originator_version=cls.INITIAL_VERSION, timestamp=datetime.now(tz=TZINFO), **kwargs, ) except TypeError as e: msg = ( f"Unable to construct 'aggregate created' " f"event with class {event_class.__qualname__} " f"and keyword args {kwargs}: {e}" ) raise TypeError(msg) # Construct the aggregate object. agg: TAggregate = created_event.mutate(None) # Append the domain event to pending list. agg.pending_events.append(created_event) # Return the aggregate. return agg class Aggregate(ABC, metaclass=MetaAggregate): """ Base class for aggregate roots. """ class Event(AggregateEvent): pass class Created(AggregateCreated): pass def __new__(cls, *args: Any, **kwargs: Any) -> Any: return object.__new__(cls) def __eq__(self, other: Any) -> bool: return type(self) == type(other) and self.__dict__ == other.__dict__ def __repr__(self) -> str: attrs = [ f"{k.lstrip('_')}={v!r}" for k, v in self.__dict__.items() if k != "_pending_events" ] return f"{type(self).__name__}({', '.join(attrs)})" # noinspection PyShadowingBuiltins def __base_init__(self, id: UUID, version: int, timestamp: datetime) -> None: """ Initialises an aggregate object with an :data:`id`, a :data:`version` number, and a :data:`timestamp`. The internal :data:`pending_events` list is also initialised. """ self._id = id self._version = version self._created_on = timestamp self._modified_on = timestamp self._pending_events: List[AggregateEvent] = [] @property def id(self) -> UUID: """ The ID of the aggregate. """ return self._id @property def version(self) -> int: """ The version number of the aggregate. """ return self._version <|fim▁hole|> @property def created_on(self) -> datetime: """ The date and time when the aggregate was created. """ return self._created_on @property def modified_on(self) -> datetime: """ The date and time when the aggregate was last modified. """ return self._modified_on @modified_on.setter def modified_on(self, modified_on: datetime) -> None: self._modified_on = modified_on @property def pending_events(self) -> List[AggregateEvent]: """ A list of pending events. """ return self._pending_events def trigger_event( self, event_class: Type[TAggregateEvent], **kwargs: Any, ) -> None: """ Triggers domain event of given type, by creating an event object and using it to mutate the aggregate. """ # Construct the domain event as the # next in the aggregate's sequence. # Use counting to generate the sequence. next_version = self.version + 1 try: new_event = event_class( # type: ignore originator_id=self.id, originator_version=next_version, timestamp=datetime.now(tz=TZINFO), **kwargs, ) except TypeError as e: raise TypeError(f"Can't construct event {event_class}: {e}") # Mutate aggregate with domain event. new_event.mutate(self) # Append the domain event to pending list. self.pending_events.append(new_event) def collect_events(self) -> List[AggregateEvent]: """ Collects and returns a list of pending aggregate :class:`AggregateEvent` objects. """ collected = [] while self.pending_events: collected.append(self.pending_events.pop(0)) return collected def aggregate( cls: Optional[MetaAggregate] = None, *, created_event_name: Optional[str] = None ) -> Union[MetaAggregate, Callable]: """ Converts the class that was passed in to inherit from Aggregate. .. code-block:: python @aggregate class MyAggregate: pass ...is equivalent to... .. code-block:: python class MyAggregate(Aggregate): pass """ def decorator(cls: Any) -> MetaAggregate: if issubclass(cls, Aggregate): raise TypeError(f"{cls.__name__} is already an Aggregate") bases = cls.__bases__ if bases == (object,): bases = (Aggregate,) else: bases += (Aggregate,) return MetaAggregate( cls.__name__, bases, dict(cls.__dict__), created_event_name=created_event_name, ) if cls: return decorator(cls) else: return decorator class VersionError(Exception): """ Raised when a domain event can't be applied to an aggregate due to version mismatch indicating the domain event is not the next in the aggregate's sequence of events. """ class Snapshot(DomainEvent): # noinspection PyUnresolvedReferences """ Snapshots represent the state of an aggregate at a particular version. Constructor arguments: :param UUID originator_id: ID of originating aggregate. :param int originator_version: version of originating aggregate. :param datetime timestamp: date-time of the event :param str topic: string that includes a class and its module :param dict state: version of originating aggregate. """ topic: str state: dict # noinspection PyShadowingNames @classmethod def take(cls, aggregate: TAggregate) -> "Snapshot": """ Creates a snapshot of the given :class:`Aggregate` object. """ aggregate_state = dict(aggregate.__dict__) aggregate_state.pop("_pending_events") class_version = getattr(type(aggregate), "class_version", 1) if class_version > 1: aggregate_state["class_version"] = class_version originator_id = aggregate_state.pop("_id") originator_version = aggregate_state.pop("_version") # noinspection PyArgumentList return cls( # type: ignore originator_id=originator_id, originator_version=originator_version, timestamp=datetime.now(tz=TZINFO), topic=get_topic(type(aggregate)), state=aggregate_state, ) def mutate(self, _: None = None) -> TAggregate: """ Reconstructs the snapshotted :class:`Aggregate` object. """ cls = resolve_topic(self.topic) assert issubclass(cls, Aggregate) aggregate_state = dict(self.state) from_version = aggregate_state.pop("class_version", 1) class_version = getattr(cls, "class_version", 1) while from_version < class_version: upcast_name = f"upcast_v{from_version}_v{from_version + 1}" upcast = getattr(cls, upcast_name) upcast(aggregate_state) from_version += 1 aggregate_state["_id"] = self.originator_id aggregate_state["_version"] = self.originator_version aggregate_state["_pending_events"] = [] # noinspection PyShadowingNames aggregate = object.__new__(cls) aggregate.__dict__.update(aggregate_state) return aggregate<|fim▁end|>
@version.setter def version(self, version: int) -> None: self._version = version
<|file_name|>manage.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python import os import sys <|fim▁hole|> os.environ["DJANGO_SETTINGS_MODULE"] = "tests.settings" test_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, test_dir) except ImportError: pass else: from django.core.management import execute_from_command_line def main(): os.environ.setdefault("DJANGO_SETTINGS_MODULE", "%s.settings" % __package__) execute_from_command_line(sys.argv) if __name__ == "__main__": main()<|fim▁end|>
try:
<|file_name|>test_align.py<|end_file_name|><|fim▁begin|>import os.path import unittest from unittest.mock import patch import libpipe from libpipe.cmds.align import HisatCmd import logging log = logging.getLogger(__name__) class TestHistatCmd(unittest.TestCase): def setUp(self): # prevent error logs from occuring during testing patcher = patch.object(libpipe.cmds.base.log, 'error') patcher.start() self.addCleanup(patcher.stop)<|fim▁hole|> # override base cmd method patcher = patch.object(libpipe.cmds.base.BaseCmd, '_cmd') patcher.start() self.addCleanup(patcher.stop) def sample_cmd(self): kw = { '-U': 'upath/seq.fa', '-x': 'gpath/gen', 'timestamp': '000', '-S': 'path/al.sam', } return HisatCmd(**kw) # # Test _prepcmd # def test_prepcmd_sets_S_if_not_given(self): hc = self.sample_cmd() del hc.kwargs['-S'] hc._prepcmd() self.assertEqual( hc.kwargs['-S'], 'upath/seq_gen.sam', ) def test_prepcmd_sets_redirect_to_log_file(self): hc = self.sample_cmd() hc._prepcmd() self.assertTrue( hc.redirect.endswith('path/al_gen_000_hisat.log'), 'Redirect not set to expected log file ({})'.format(hc.redirect), ) def test_prepcmd_sets_redirect_for_stdout_and_stderr_to_tee(self): hc = self.sample_cmd() hc._prepcmd() self.assertTrue( hc.redirect.startswith('2>&1 | tee -a'), 'Redirect not set properly: {}'.format(hc.redirect), ) def test_prepcmd_sets_unal_based_on_given_samfile_name(self): hc = self.sample_cmd() hc._prepcmd() expected_file = os.path.splitext(hc.kwargs['-S'])[0] + '.unal.fastq' self.assertIn('--un', hc.kwargs) self.assertEqual(hc.kwargs['--un'], expected_file) # # Test cmd # def test_cmd_raises_AttributeError_if_only_one_ppe_given(self): hc = self.sample_cmd() hc.kwargs['-1'] = hc.kwargs['-U'] del hc.kwargs['-U'] with self.assertRaises(AttributeError): hc.cmd() def test_addreq_raises_FileNotFoundError_if_n_idx_ne_expected(self): with patch('remsci.lib.utility.path.walk_file') as m: for i in [0, 100]: with self.subTest(n_indx=i): m.return_value = [0] * i hc = self.sample_cmd() with self.assertRaises(FileNotFoundError): hc._additional_requirements() # # Test _prepreq # def test_prepreq_raises_TypeError_if_linked_input_not_used(self): with patch.object( HisatCmd, 'output', autospec=True, return_value=['seq.txt']): ohc = self.sample_cmd() ihc = self.sample_cmd() ohc.link(ihc) with self.assertRaises(TypeError): ihc._prepreq() def test_prepreq_sets_single_link_input_to_U_kwarg(self): with patch.object(HisatCmd, 'output', return_value=['seq.fq']): ohc = self.sample_cmd() ihc = self.sample_cmd() ohc.link(ihc) ihc._prepreq() self.assertEqual(ihc.kwargs['-U'], 'seq.fq') def test_prepreq_sets_double_link_input_to_1_and_2_kwarg(self): args = ['seq.1.fq', 'seq.2.fq'] with patch.object(HisatCmd, 'output', return_value=args): ohc = self.sample_cmd() ihc = self.sample_cmd() ohc.link(ihc) ihc._prepreq() self.assertEqual(ihc.kwargs['-1'], 'seq.1.fq') self.assertEqual(ihc.kwargs['-2'], 'seq.2.fq') def test_prepreq_preserves_kwargs_if_no_input_given(self): ihc = self.sample_cmd() ihc._prepreq() self.assertEqual(ihc.kwargs['-U'], 'upath/seq.fa') if __name__ == '__main__': unittest.main()<|fim▁end|>
<|file_name|>arar_figure.py<|end_file_name|><|fim▁begin|># =============================================================================== # Copyright 2013 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== import math # ============= enthought library imports ======================= from chaco.array_data_source import ArrayDataSource from chaco.tools.broadcaster import BroadcasterTool from chaco.tools.data_label_tool import DataLabelTool from numpy import Inf, vstack, zeros_like, ma from traits.api import HasTraits, Any, Int, Str, Property, \ Event, cached_property, List, Float, Instance, TraitError from uncertainties import std_dev, nominal_value, ufloat from pychron.core.filtering import filter_ufloats, sigma_filter from pychron.core.helpers.formatting import floatfmt, format_percent_error, standard_sigfigsfmt from pychron.graph.error_bar_overlay import ErrorBarOverlay from pychron.graph.ticks import SparseLogTicks from pychron.graph.ticks import SparseTicks from pychron.graph.tools.analysis_inspector import AnalysisPointInspector from pychron.graph.tools.point_inspector import PointInspectorOverlay from pychron.graph.tools.rect_selection_tool import RectSelectionOverlay, \ RectSelectionTool from pychron.pipeline.plot.flow_label import FlowDataLabel, FlowPlotLabel from pychron.pipeline.plot.overlays.points_label_overlay import PointsLabelOverlay from pychron.pipeline.plot.point_move_tool import OverlayMoveTool from pychron.processing.analyses.analysis_group import AnalysisGroup from pychron.pychron_constants import PLUSMINUS, format_mswd class SelectionFigure(HasTraits): graph = Any def _set_selected(self, ans, sel): for i, a in enumerate(ans): if i in sel: a.set_temp_status(a.otemp_status if a.otemp_status else 'omit') else: a.set_temp_status('ok') def _filter_metadata_changes(self, obj, ans, func=None): sel = obj.metadata.get('selections', []) self._set_selected(ans, sel) if func: func(sel) return sel class BaseArArFigure(SelectionFigure): analyses = Any sorted_analyses = Property(depends_on='analyses') analysis_group = Property(depends_on='analyses, _analysis_group') _analysis_group = Instance(AnalysisGroup) _analysis_group_klass = AnalysisGroup group_id = Int ytitle = Str title = Str xtitle = Str replot_needed = Event recalculate_event = Event options = Any refresh_unknowns_table = Event suppress_ylimits_update = False suppress_xlimits_update = False xpad = None ymas = List ymis = List xmi = Float xma = Float data_xma = 0 _has_formatting_hash = None _reverse_sorted_analyses = False def get_update_dict(self): return {} def build(self, plots, plot_dict=None): """ make plots """ graph = self.graph vertical_resize = not all([p.height for p in plots]) graph.vertical_resize = vertical_resize graph.clear_has_title() title = self.title if not title: title = self.options.title for i, po in enumerate(plots): kw = {'ytitle': po.name} if plot_dict: kw.update(plot_dict) if po.height: kw['bounds'] = [50, po.height] if i == (len(plots) - 1): kw['title'] = title if i == 0 and self.ytitle: kw['ytitle'] = self.ytitle if not po.ytitle_visible: kw['ytitle'] = '' if self.xtitle: kw['xtitle'] = self.xtitle kw['padding'] = self.options.get_paddings() p = graph.new_plot(**kw) if i == (len(plots) - 1): p.title_font = self.options.title_font # set a tag for easy identification p.y_axis.tag = po.name self._setup_plot(i, p, po) def post_make(self): self._fix_log_axes() def post_plot(self, plots): graph = self.graph for (plotobj, po) in zip(graph.plots, plots): self._apply_aux_plot_options(plotobj, po) def plot(self, *args, **kw): pass def replot(self, *args, **kw): if self.options: self.plot(self.options.get_plotable_aux_plots()) def max_x(self, *args): return -Inf def min_x(self, *args): return Inf def mean_x(self, *args): return 0 # private def _fix_log_axes(self): for i, p in enumerate(self.graph.plots): if p.value_scale == 'log': if p.value_mapper.range.low < 0: ys = self.graph.get_data(plotid=i, axis=1) m = 10 ** math.floor(math.log10(min(ys))) p.value_mapper.range.low = m def _setup_plot(self, i, pp, po): # add limit tools self.graph.add_limit_tool(pp, 'x', self._handle_xlimits) self.graph.add_limit_tool(pp, 'y', self._handle_ylimits) self.graph.add_axis_tool(pp, pp.x_axis) self.graph.add_axis_tool(pp, pp.y_axis) pp.value_range.on_trait_change(lambda: self.update_options_limits(i), 'updated') pp.index_range.on_trait_change(lambda: self.update_options_limits(i), 'updated') pp.value_range.tight_bounds = False self._apply_aux_plot_options(pp, po) def _apply_aux_plot_options(self, pp, po): options = self.options for k, axis in (('x', pp.x_axis), ('y', pp.y_axis)): for attr in ('title_font', 'tick_in', 'tick_out', 'tick_label_formatter'): value = getattr(options, '{}{}'.format(k, attr)) try: setattr(axis, attr, value) except TraitError: pass axis.tick_label_font = getattr(options, '{}tick_font'.format(k)) # pp.x_axis.title_font = options.xtitle_font # pp.x_axis.tick_label_font = options.xtick_font # pp.x_axis.tick_in = options.xtick_in # pp.x_axis.tick_out = options.xtick_out # # pp.y_axis.title_font = options.ytitle_font # pp.y_axis.tick_label_font = options.ytick_font # pp.y_axis.tick_in = options.ytick_in # pp.y_axis.tick_out = options.ytick_out pp.bgcolor = options.plot_bgcolor pp.x_grid.visible = options.use_xgrid pp.y_grid.visible = options.use_ygrid if po: if not po.ytick_visible: pp.y_axis.tick_visible = False pp.y_axis.tick_label_formatter = lambda x: '' if po.y_axis_right: pp.y_axis.orientation = 'right' pp.y_axis.axis_line_visible = False pp.value_scale = po.scale if po.scale == 'log': if po.use_sparse_yticks: st = SparseLogTicks(step=po.sparse_yticks_step) pp.value_axis.tick_generator = st pp.value_grid.tick_generator = st else: pp.value_axis.tick_interval = po.ytick_interval if po.use_sparse_yticks: st = SparseTicks(step=po.sparse_yticks_step) pp.value_axis.tick_generator = st pp.value_grid.tick_generator = st def _set_options_format(self, pp): # print 'using options format' pass def _set_selected(self, ans, sel): super(BaseArArFigure, self)._set_selected(ans, sel) self.refresh_unknowns_table = True def _cmp_analyses(self, x): return x.timestamp or 0 def _unpack_attr(self, attr, scalar=1, exclude_omit=False, nonsorted=False, ans=None): if ans is None: ans = self.sorted_analyses if nonsorted: ans = self.analyses def gen(): for ai in ans: if exclude_omit and ai.is_omitted(): continue v = ai.get_value(attr) if v is None: v = ufloat(0, 0) yield v * scalar return gen() def _set_y_limits(self, a, b, min_=None, max_=None, pid=0, pad=None): mi, ma = self.graph.get_y_limits(plotid=pid) mi = min_ if min_ is not None else min(mi, a) ma = max_ if max_ is not None else max(ma, b) self.graph.set_y_limits(min_=mi, max_=ma, pad=pad, plotid=pid, pad_style='upper') def update_options_limits(self, pid): if not self.suppress_xlimits_update: if hasattr(self.options, 'aux_plots'): # n = len(self.options.aux_plots) xlimits = self.graph.get_x_limits(pid) for ap in self.options.aux_plots: ap.xlimits = xlimits if not self.suppress_ylimits_update: if hasattr(self.options, 'aux_plots'): # n = len(self.options.aux_plots) ylimits = self.graph.get_y_limits(pid) for i, ap in enumerate(self.options.get_plotable_aux_plots()): if i == pid:<|fim▁hole|> # for ap in self.options.aux_plots: # ap.ylimits = ylimits # ap = self.options.aux_plots[n - pid - 1] # if not self.suppress_ylimits_update: # ap.ylimits = self.graph.get_y_limits(pid) # if not self.suppress_xlimits_update: # ap.xlimits = self.graph.get_x_limits(pid) # print('asdfpasdf', id(self.options), id(ap), ap.xlimits) def get_valid_xbounds(self): pass # =========================================================================== # aux plots # =========================================================================== def _do_aux_plot_filtering(self, scatter, po, vs, es): omits, invalids, outliers = [], [], [] if po.filter_str: omits, invalids, outliers = self._get_aux_plot_filtered(po, vs, es) for idx, item in enumerate(self.sorted_analyses): if idx in omits: s = 'omit' elif idx in invalids: s = 'invalid' elif idx in outliers: s = 'outlier' else: s = 'ok' item.set_temp_status(s) return omits, invalids, outliers def _get_aux_plot_filtered(self, po, vs, es=None): omits = [] invalids = [] outliers = [] fs = po.filter_str nsigma = po.sigma_filter_n if fs or nsigma: if es is None: es = zeros_like(vs) ufs = vstack((vs, es)).T filter_str_idx = None if fs: filter_str_idx = filter_ufloats(ufs, fs) ftag = po.filter_str_tag.lower() if ftag == 'invalid': invalids.extend(filter_str_idx) elif ftag == 'outlier': outliers.extend(filter_str_idx) else: omits.extend(filter_str_idx) if nsigma: vs = ma.array(vs, mask=False) if filter_str_idx is not None: vs.mask[filter_str_idx] = True sigma_idx = sigma_filter(vs, nsigma) stag = po.sigma_filter_tag.lower() if stag == 'invalid': invalids.extend(sigma_idx) elif stag == 'outlier': outliers.extend(sigma_idx) else: omits.extend(sigma_idx) return omits, invalids, outliers def _plot_raw_40_36(self, po, pid): k = 'uAr40/Ar36' return self._plot_aux('noncor. <sup>40</sup>Ar/<sup>36</sup>Ar', k, po, pid) def _plot_ic_40_36(self, po, pobj, pid): k = 'Ar40/Ar36' return self._plot_aux('<sup>40</sup>Ar/<sup>36</sup>Ar', k, po, pid) def _plot_icf_40_36(self, po, pobj, pid): k = 'icf_40_36' return self._plot_aux('ifc <sup>40</sup>Ar/<sup>36</sup>Ar', k, po, pid) def _plot_radiogenic_yield(self, po, pobj, pid): k = 'radiogenic_yield' return self._plot_aux('%<sup>40</sup>Ar*', k, po, pid) def _plot_kcl(self, po, pobj, pid): k = 'kcl' return self._plot_aux('K/Cl', k, po, pid) def _plot_kca(self, po, pobj, pid): k = 'kca' return self._plot_aux('K/Ca', k, po, pid) def _plot_signal_k39(self, po, pobj, pid): k = 'k39' return self._plot_aux('<sup>39</sup>Ar<sub>K</sub>(fA)', k, po, pid) def _plot_moles_k39(self, po, pobj, pid): k = 'moles_k39' return self._plot_aux('<sup>39</sup>Ar<sub>K</sub>(mol)', k, po, pid) def _plot_moles_ar40(self, po, pobj, pid): k = 'Ar40' return self._plot_aux('<sup>40</sup>Ar<sub>tot</sub>(fA)', k, po, pid) def _plot_moles_ar36(self, po, pobj, pid): k = 'Ar36' return self._plot_aux('<sup>36</sup>Ar<sub>tot</sub>(fA)', k, po, pid) def _plot_extract_value(self, po, pobj, pid): k = 'extract_value' return self._plot_aux('Extract Value', k, po, pid) def _get_aux_plot_data(self, k, scalar=1): vs = list(self._unpack_attr(k, scalar=scalar)) return [nominal_value(vi) for vi in vs], [std_dev(vi) for vi in vs] def _handle_ylimits(self): pass def _handle_xlimits(self): pass def _add_point_labels(self, scatter, ans=None): labels = [] f = self.options.analysis_label_format if not f: f = '{aliquot:02d}{step:}' if ans is None: ans = self.sorted_analyses labels = [f.format(aliquot=si.aliquot, step=si.step, sample=si.sample, name=si.name, label_name=si.label_name, runid=si.record_id) for si in ans] font = self.options.label_font ov = PointsLabelOverlay(component=scatter, labels=labels, label_box=self.options.label_box, font=font) scatter.underlays.append(ov) def _add_error_bars(self, scatter, errors, axis, nsigma, end_caps=True, visible=True): ebo = ErrorBarOverlay(component=scatter, orientation=axis, nsigma=nsigma, visible=visible, use_end_caps=end_caps) scatter.underlays.append(ebo) setattr(scatter, '{}error'.format(axis), ArrayDataSource(errors)) return ebo def _add_scatter_inspector(self, scatter, inspector=None, add_tool=True, add_selection=True, value_format=None, additional_info=None, index_tag=None, index_attr=None, convert_index=None, items=None, update_meta_func=None): if add_tool: broadcaster = BroadcasterTool() scatter.tools.append(broadcaster) if add_selection: rect_tool = RectSelectionTool(scatter) rect_overlay = RectSelectionOverlay(component=scatter, tool=rect_tool) scatter.overlays.append(rect_overlay) broadcaster.tools.append(rect_tool) if inspector is None: if value_format is None: def value_format(x): return '{:0.5f}'.format(x) if convert_index is None: def convert_index(x): return '{:0.3f}'.format(x) if items is None: items = self.sorted_analyses inspector = AnalysisPointInspector(scatter, use_pane=False, analyses=items, convert_index=convert_index, index_tag=index_tag, index_attr=index_attr, value_format=value_format, additional_info=additional_info) pinspector_overlay = PointInspectorOverlay(component=scatter, tool=inspector) scatter.overlays.append(pinspector_overlay) broadcaster.tools.append(inspector) else: if not isinstance(inspector, (list, tuple)): inspector = (inspector,) for i in inspector: broadcaster.tools.append(i) # # pinspector_overlay = PointInspectorOverlay(component=scatter, # # tool=point_inspector) # # print 'fff', inspector # # event_queue = {} # for i in inspector: # i.event_queue = event_queue # i.on_trait_change(self._handle_inspection, 'inspector_item') # # scatter.overlays.append(pinspector_overlay) # broadcaster.tools.append(i) if update_meta_func is None: update_meta_func = self.update_graph_metadata # u = lambda a, b, c, d: self.update_graph_metadata(a, b, c, d) scatter.index.on_trait_change(update_meta_func, 'metadata_changed') def update_graph_metadata(self, obj, name, old, new): pass # =============================================================================== # labels # =============================================================================== def _add_info_label(self, plot, text_lines, font=None): if font is None: font = self.options.error_info_font ov = FlowPlotLabel(text='\n'.join(text_lines), overlay_position='inside top', hjustify='left', bgcolor=plot.bgcolor, font=font, component=plot) plot.overlays.append(ov) plot.tools.append(OverlayMoveTool(component=ov)) def _add_data_label(self, s, text, point, bgcolor='transparent', label_position='top right', color=None, append=True, **kw): if color is None: color = s.color label = FlowDataLabel(component=s, data_point=point, label_position=label_position, label_text=text, border_visible=False, bgcolor=bgcolor, show_label_coords=False, marker_visible=False, text_color=color, # setting the arrow to visible causes an error when reading with illustrator # if the arrow is not drawn arrow_visible=False, **kw) s.overlays.append(label) tool = DataLabelTool(label) if append: label.tools.append(tool) else: label.tools.insert(0, tool) label.on_trait_change(self._handle_overlay_move, 'label_position') return label def _build_label_text(self, x, we, n, total_n=None, mswd_args=None, display_n=True, display_mswd=True, display_mswd_pvalue=False, percent_error=False, sig_figs=3, mswd_sig_figs=3): display_mswd = n >= 2 and display_mswd if display_n: if total_n and n != total_n: n = 'n= {}/{}'.format(n, total_n) else: n = 'n= {}'.format(n) else: n = '' if mswd_args and display_mswd: mswd, valid_mswd, _, pvalue = mswd_args mswd = format_mswd(mswd, valid_mswd, n=mswd_sig_figs, include_tag=True) if display_mswd_pvalue: mswd = '{} pvalue={:0.2f}'.format(mswd, pvalue) else: mswd = '' if sig_figs == 'Std': sx, swe = standard_sigfigsfmt(x, we) else: sx = floatfmt(x, sig_figs) swe = floatfmt(we, sig_figs) if self.options.index_attr in ('uF', 'Ar40/Ar36'): me = u'{} {}{}'.format(sx, PLUSMINUS, swe) else: age_units = self._get_age_units() pe = '' if percent_error: pe = '({})'.format(format_percent_error(x, we, include_percent_sign=True)) me = u'{} {}{}{} {}'.format(sx, PLUSMINUS, swe, pe, age_units) return u'{} {} {}'.format(me, mswd, n) def _get_age_units(self): a = 'Ma' if self.analyses: a = self.analyses[0].arar_constants.age_units return a def _set_renderer_selection(self, rs, sel): meta = {'selections': sel} for rend in rs: rend.index.trait_set(metadata=meta) def _handle_label_move(self, obj, name, old, new): axps = [a for a in self.options.aux_plots if a.plot_enabled][::-1] for i, p in enumerate(self.graph.plots): if next((pp for pp in p.plots.values() if obj.component == pp[0]), None): axp = axps[i] if hasattr(new, '__iter__'): new = [float(ni) for ni in new] else: new = float(new) axp.set_overlay_position(obj.id, new) def _handle_overlay_move(self, obj, name, old, new): axps = [a for a in self.options.aux_plots if a.plot_enabled][::-1] for i, p in enumerate(self.graph.plots): if next((pp for pp in p.plots.values() if obj.component == pp[0]), None): axp = axps[i] if hasattr(new, '__iter__'): new = [float(ni) for ni in new] else: new = float(new) axp.set_overlay_position(obj.id, new) break def _analysis_group_hook(self, ag): pass # =============================================================================== # property get/set # =============================================================================== @cached_property def _get_sorted_analyses(self): return sorted(self.analyses, key=self._cmp_analyses, reverse=self._reverse_sorted_analyses) @cached_property def _get_analysis_group(self): ag = self._analysis_group if ag is None: ag = self._analysis_group_klass(group_id=self.group_id, analyses=self.sorted_analyses, omit_by_tag=self.options.omit_by_tag) self._analysis_group_hook(ag) return ag def _set_analysis_group(self, v): self._analysis_group = v # ============= EOF =============================================<|fim▁end|>
ap.ylimits = ylimits break
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #<|fim▁hole|># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ._query import query<|fim▁end|>
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS,
<|file_name|>class_demos.py<|end_file_name|><|fim▁begin|># Notes on classes class Sample(): def __init__(self, name, number): self.name = name self.number = number def print_values(self): print(f"name: {self.name}") print(f"number: {self.number}") class SampleWithProperties(): def __init__(self, name, number): self.name = name self.number = number @property def name(self): # double underscore is to tell future devs to avoid variable return self.__name @property def double_name(self): # Can return calculated or other values besides fields return 2 * self.__name @property def number(self): return self.__number @name.setter def name(self, value): # Often has some sort of validation or transformation code self.__name = value @number.setter def number(self, value): # Often has some sort of validation or transformation code self.__number = value % 2 class SuperClass(): def __init__(self, name): self.name = name def speak(self): print(f"Hey, ho {self.name}") class SubClass(SuperClass): def __init__(self, name, location): super().__init__(name) self.location = location def shout_out(self): print(f"{self.location} is where it's at") def speak(self): # Need to explicitly over ride parent methods # calling it here, eg, super().speak() # just calls it. If super.method() is not # called, then only this code would run print(f"{self.location}, let's go! ") if __name__ == "__main__": <|fim▁hole|> print(f"Access name field directly: {instance.name}") instance.number += 100 print(f"Access number field directly: {instance.number}") ''' ''' # Demo SampleWithProperties() instance_with_props = SampleWithProperties("fred", 3) # Directly accessing values # Next line fails # print(f"Access name field, direct: {instance_with_props.__name}") # Python rewrites value names with intial __ to protect namespace # not really a private value, but less likely to be accessed print(f"Access name field, direct: {instance_with_props._SampleWithProperties__name}") # Using getter to access values, looks like direct access but isn't # name field print(f"Access name field, getter: {instance_with_props.name}") print(f"Access name field, getter: {instance_with_props.double_name}") instance_with_props.name = "Barney" print(f"Access name field, after setter: {instance_with_props.name}") # number field print(f"Access number field, before setter: {instance_with_props.number}") instance_with_props.number = 4 print(f"Access number field, after setter: {instance_with_props.number}") instance_with_props.number = 3 print(f"Access number field, after setter: {instance_with_props.number}") ''' # Demo inheritance # Show super class functions instance_super = SuperClass("Johnny") print(f"Name, super: {instance_super.name}") print("") # Show sub inherits name, methods instance_sub = SubClass("Joey", "Lower East Side") print(f"Name, super: {instance_sub.name}") print(f"Method from super: ", end="") instance_sub.super().speak() print("") # Show sub can override parent print(f"Overide from super: ", end="") instance_sub.speak() # Figure out how to call the super method from the instance rather than from the class definition<|fim▁end|>
''' # Demo Sample() instance = Sample("fred", 3) instance.print_values()
<|file_name|>handler_utils.go<|end_file_name|><|fim▁begin|>package monitor import ( "context" "fmt" "strings" "time" v32 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" "github.com/rancher/norman/httperror" "github.com/rancher/norman/types" "github.com/rancher/rancher/pkg/controllers/managementagent/workload" v3 "github.com/rancher/rancher/pkg/generated/norman/management.cattle.io/v3" "github.com/rancher/rancher/pkg/ref" "github.com/rancher/rancher/pkg/types/config" "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( prometheusReqTimeout = 30 * time.Second ) var ( defaultQueryDuring = "5m" defaultTo = "now" defaultFrom = "now-" + defaultQueryDuring ) func newClusterGraphInputParser(input v32.QueryGraphInput) *clusterGraphInputParser { return &clusterGraphInputParser{ Input: &input, } } type clusterGraphInputParser struct { Input *v32.QueryGraphInput ClusterName string Start time.Time End time.Time Step time.Duration Conditions []*types.QueryCondition } func (p *clusterGraphInputParser) parse() (err error) { if p.Input.MetricParams == nil { p.Input.MetricParams = make(map[string]string) } p.Start, p.End, p.Step, err = parseTimeParams(p.Input.From, p.Input.To, p.Input.Interval) if err != nil { return err } return p.parseFilter() } func (p *clusterGraphInputParser) parseFilter() error { if p.Input.Filters == nil { return fmt.Errorf("must have clusterId filter") } p.ClusterName = p.Input.Filters["clusterId"] if p.ClusterName == "" { return fmt.Errorf("clusterId is empty") } for name, value := range p.Input.Filters { p.Conditions = append(p.Conditions, types.NewConditionFromString(name, types.ModifierEQ, value)) } return nil } func newProjectGraphInputParser(input v32.QueryGraphInput) *projectGraphInputParser { return &projectGraphInputParser{ Input: &input, } } type projectGraphInputParser struct { Input *v32.QueryGraphInput ProjectID string ClusterName string Start time.Time End time.Time Step time.Duration Conditions []*types.QueryCondition } func (p *projectGraphInputParser) parse() (err error) { if p.Input.MetricParams == nil { p.Input.MetricParams = make(map[string]string) } p.Start, p.End, p.Step, err = parseTimeParams(p.Input.From, p.Input.To, p.Input.Interval) if err != nil { return err } return p.parseFilter() } func (p *projectGraphInputParser) parseFilter() error { if p.Input.Filters == nil { return fmt.Errorf("must have projectId filter") } p.ProjectID = p.Input.Filters["projectId"] if p.ProjectID == "" { return fmt.Errorf("projectId is empty") } if p.ClusterName, _ = ref.Parse(p.ProjectID); p.ClusterName == "" { return fmt.Errorf("clusterName is empty") } for name, value := range p.Input.Filters { p.Conditions = append(p.Conditions, types.NewConditionFromString(name, types.ModifierEQ, value)) } return nil } type authChecker struct { ProjectID string Input *v32.QueryGraphInput UserContext *config.UserContext } func newAuthChecker(ctx context.Context, userContext *config.UserContext, input *v32.QueryGraphInput, projectID string) *authChecker { return &authChecker{ ProjectID: projectID, Input: input, UserContext: userContext, } } func (a *authChecker) check() error { return a.parseNamespace() } func (a *authChecker) parseNamespace() error { if a.Input.MetricParams["namespace"] != "" { if !a.isAuthorizeNamespace() { return fmt.Errorf("could not query unauthorize namespace") } return nil } nss, err := a.getAuthroizeNamespace() if err != nil { return err } a.Input.MetricParams["namespace"] = nss return nil } func (a *authChecker) isAuthorizeNamespace() bool { ns, err := a.UserContext.Core.Namespaces(metav1.NamespaceAll).Get(a.Input.MetricParams["namespace"], metav1.GetOptions{}) if err != nil { logrus.Errorf("get namespace %s info failed, %v", a.Input.MetricParams["namespace"], err) return false } return ns.Annotations[projectIDAnn] == a.ProjectID } func (a *authChecker) getAuthroizeNamespace() (string, error) { nss, err := a.UserContext.Core.Namespaces(metav1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { return "", fmt.Errorf("list namespace failed, %v", err) } var authNs []string for _, v := range nss.Items { if v.Annotations[projectIDAnn] == a.ProjectID { authNs = append(authNs, v.Name) } } return strings.Join(authNs, "|"), nil } func getAuthToken(userContext *config.UserContext, appName, namespace string) (string, error) { sa, err := userContext.Core.ServiceAccounts(namespace).Get(appName, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("get service account %s:%s for monitor failed, %v", namespace, appName, err) } var secretName string if secretName = sa.Secrets[0].Name; secretName == "" { return "", fmt.Errorf("get secret from service account %s:%s for monitor failed, secret name is empty", namespace, appName) } secret, err := userContext.Core.Secrets(namespace).Get(secretName, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("get secret %s:%s for monitor failed, %v", namespace, secretName, err) } return string(secret.Data["token"]), nil } func parseMetricParams(userContext *config.UserContext, nodeLister v3.NodeLister, resourceType, clusterName, projectName string, metricParams map[string]string) (map[string]string, error) { newMetricParams := make(map[string]string) for k, v := range metricParams { newMetricParams[k] = v } var ip string var err error switch resourceType { case ResourceNode: instance := newMetricParams["instance"] if instance == "" { return nil, fmt.Errorf("instance in metric params is empty") } ip, err = nodeName2InternalIP(nodeLister, clusterName, instance) if err != nil { return newMetricParams, err } case ResourceWorkload: workloadName := newMetricParams["workloadName"] rcType, ns, name, err := parseWorkloadName(workloadName) if err != nil { return newMetricParams, err } if !validateNS(newMetricParams, ns) { return nil, httperror.NewAPIError(httperror.PermissionDenied, fmt.Sprintf("can't access namespace %s from project %s", ns, projectName)) } var podOwners []string if workloadName != "" { if rcType == workload.ReplicaSetType || rcType == workload.ReplicationControllerType || rcType == workload.DaemonSetType || rcType == workload.StatefulSetType || rcType == workload.JobType || rcType == workload.CronJobType { podOwners = []string{name} } if rcType == workload.DeploymentType { rcs, err := userContext.Apps.ReplicaSets(ns).List(metav1.ListOptions{}) if err != nil { return newMetricParams, fmt.Errorf("list replicasets failed, %v", err) } for _, rc := range rcs.Items { if len(rc.OwnerReferences) != 0 && strings.ToLower(rc.OwnerReferences[0].Kind) == workload.DeploymentType && rc.OwnerReferences[0].Name == name { podOwners = append(podOwners, rc.Name) } } rcType = workload.ReplicaSetType } var podNames []string pods, err := userContext.Core.Pods(ns).List(metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("list pod failed, %v", err) } for _, pod := range pods.Items { if len(pod.OwnerReferences) != 0 { podRefName := pod.OwnerReferences[0].Name podRefKind := pod.OwnerReferences[0].Kind if contains(podRefName, podOwners...) && strings.ToLower(podRefKind) == rcType { podNames = append(podNames, pod.Name) } } } newMetricParams["podName"] = strings.Join(podNames, "|") } case ResourcePod: podName := newMetricParams["podName"] if podName == "" { return nil, fmt.Errorf("pod name is empty") } ns, name := ref.Parse(podName) if !validateNS(newMetricParams, ns) { return nil, httperror.NewAPIError(httperror.PermissionDenied, fmt.Sprintf("can't access namespace %s from project %s", ns, projectName)) } newMetricParams["namespace"] = ns newMetricParams["podName"] = name case ResourceContainer: podName := newMetricParams["podName"] if podName == "" { return nil, fmt.Errorf("pod name is empty") } ns, name := ref.Parse(podName) if !validateNS(newMetricParams, ns) { return nil, httperror.NewAPIError(httperror.PermissionDenied, fmt.Sprintf("can't access namespace %s from project %s", ns, projectName)) } newMetricParams["namespace"] = ns newMetricParams["podName"] = name containerName := newMetricParams["containerName"] if containerName == "" { return nil, fmt.Errorf("container name is empty") } } newMetricParams["instance"] = ip + ".*" return newMetricParams, nil } func replaceParams(metricParams map[string]string, expr string) string { var replacer []string for k, v := range metricParams { replacer = append(replacer, "$"+k) replacer = append(replacer, v) } srp := strings.NewReplacer(replacer...) return srp.Replace(expr) } func parseTimeParams(from, to, interval string) (start, end time.Time, step time.Duration, err error) { if from == "" { from = defaultFrom } if to == "" { to = defaultTo } timeRange := NewTimeRange(from, to) start, err = timeRange.ParseFrom() if err != nil { err = fmt.Errorf("parse param from value %s failed, %v", from, err) return } end, err = timeRange.ParseTo() if err != nil { err = fmt.Errorf("parse param to value %s failed, %v", to, err) return } i, err := getIntervalFrom(interval, defaultMinInterval) if err != nil { err = fmt.Errorf("parse param interval value %s failed, %v", i, err) return } intervalCalculator := newIntervalCalculator(&IntervalOptions{MinInterval: i}) calInterval := intervalCalculator.Calculate(timeRange, i) step = time.Duration(int64(calInterval.Value))<|fim▁hole|> return } func parseWorkloadName(id string) (typeName, namespace, name string, err error) { arr := strings.Split(id, ":") if len(arr) < 3 { return "", "", "", fmt.Errorf("invalid workload name: %s", id) } return arr[0], arr[1], arr[2], nil } func contains(str string, arr ...string) bool { for _, v := range arr { if v == str { return true } } return false } func isInstanceGraph(graphType string) bool { return graphType == "singlestat" } func validateNS(params map[string]string, ns string) bool { value, ok := params["namespace"] if !ok { return false } nss := strings.Split(value, "|") for _, v := range nss { if v == ns { return true } } return false }<|fim▁end|>
<|file_name|>Keyboard.js<|end_file_name|><|fim▁begin|>MD.Keyboard = function(){ const keys = { "v": { name: "Select tool", cb: ()=> state.set("canvasMode", "select") }, "q": { name: "Freehand tool", cb: ()=> state.set("canvasMode", "fhpath") }, "l": { name: "Line tool", cb: ()=> state.set("canvasMode", "fhplineath")}, "r": { name: "Rectangle tool", cb: ()=> state.set("canvasMode", "rect")}, "o": { name: "Ellipse tool", cb: ()=> state.set("canvasMode", "ellipse")}, "s": { name: "Shape tool", cb: ()=> state.set("canvasMode", "shapelib")}, "p": { name: "Path tool", cb: ()=> state.set("canvasMode", "path")}, "t": { name: "Text tool", cb: ()=> state.set("canvasMode", "text")}, "z": { name: "Zoom tool", cb: ()=> state.set("canvasMode", "zoom")}, "e": { name: "Eyedropper tool", cb: ()=> state.set("canvasMode", "eyedropper")}, "x": { name: "Focus fill/stroke", cb: ()=> editor.focusPaint()}, "shift_x": { name: "Switch fill/stroke", cb: ()=> editor.switchPaint()}, "alt": { name: false, cb: ()=> $("#workarea").toggleClass("out", state.get("canvasMode") === "zoom" )}, "cmd_s": { name: "Save SVG File", cb: ()=> editor.save()}, "cmd_z": { name: "Undo", cb: ()=> editor.undo()}, "cmd_y": { name: "Redo", cb: ()=> editor.redo()}, "cmd_shift_z": { name: "Redo", cb: ()=> editor.redo()}, "cmd_c": { name: "Copy", cb: ()=> editor.copySelected()}, "cmd_x": { name: "Cut", cb: ()=> editor.cutSelected()}, "cmd_v": { name: "Paste", cb: ()=> editor.pasteSelected()}, "cmd_d": { name: "Duplicate", cb: ()=> editor.duplicateSelected()}, "cmd_u": { name: "View source", cb: ()=> editor.source()}, "cmd_a": { name: "Select All", cb: ()=> svgCanvas.selectAllInCurrentLayer()}, "cmd_b": { name: "Set bold text", cb: ()=> editor.text.setBold()}, "cmd_i": { name: "Set italic text", cb: ()=> editor.text.setItalic()}, "cmd_g": { name: "Group selected", cb: ()=> editor.groupSelected()}, "cmd_shift_g": { name: "Ungroup", cb: ()=> editor.ungroupSelected()}, "cmd_o": { name: "Open SVG File", cb: ()=> editor.import.open()}, "cmd_k": { name: "Place image", cb: ()=> editor.import.place()}, "backspace": { name: "Delete", cb: ()=> editor.deleteSelected()}, "delete": { name: "Delete", cb: ()=> editor.deleteSelected()}, "ctrl_arrowleft": { name: "Rotate -1deg", cb: ()=> editor.rotateSelected(0,1)}, "ctrl_arrowright": { name: "Rotate +1deg", cb: ()=> editor.rotateSelected(1,1)}, "ctrl_shift_arrowleft": { name: "Rotate -5deg", cb: ()=> editor.rotateSelected(0,5)}, "ctrl_shift_arrowright": { name: "Rotate +5deg ", cb: ()=> editor.rotateSelected(1,5)}, "shift_o": { name: "Next item", cb: ()=> svgCanvas.cycleElement(0)}, "shift_p": { name: "Prev item", cb: ()=> svgCanvas.cycleElement(1)}, "shift_r": { name: "Show/hide rulers", cb: ()=> editor.rulers.toggleRulers()}, "cmd_+": { name: "Zoom in", cb: ()=> editor.zoom.multiply(1.5)}, "cmd_-": { name: "Zoom out", cb: ()=> editor.zoom.multiply(0.75)}, "cmd_=": { name: "Actual size", cb: ()=> editor.zoom.reset()}, "arrowleft": { name: "Nudge left", cb: ()=> editor.moveSelected(-1,0)}, "arrowright": { name: "Nudge right", cb: ()=> editor.moveSelected(1,0)}, "arrowup": { name: "Nudge up", cb: ()=> editor.moveSelected(0,-1)}, "arrowdown": { name: "Nudge down", cb: ()=> editor.moveSelected(0,1)}, "shift_arrowleft": {name: "Jump left", cb: () => editor.moveSelected(state.get("canvasSnapStep") * -1, 0)}, "shift_arrowright": {name: "Jump right", cb: () => editor.moveSelected(state.get("canvasSnapStep") * 1, 0)}, "shift_arrowup": {name: "Jump up", cb: () => editor.moveSelected(0, state.get("canvasSnapStep") * -1)}, "shift_arrowdown": {name: "Jump down", cb: () => editor.moveSelected(0, state.get("canvasSnapStep") * 1)}, "cmd_arrowup":{ name: "Bring forward", cb: () => editor.moveUpSelected()}, "cmd_arrowdown":{ name: "Send backward", cb: () => editor.moveDownSelected()}, "cmd_shift_arrowup":{ name: "Bring to front", cb: () => editor.moveToTopSelected()}, "cmd_shift_arrowdown":{ name: "Send to back", cb: () => editor.moveToBottomSelected()}, "escape": { name: false, cb: ()=> editor.escapeMode()}, "enter": { name: false, cb: ()=> editor.escapeMode()}, " ": { name: "Pan canvas", cb: (e)=> editor.pan.startPan(e)}, }; document.addEventListener("keydown", function(e){ const exceptions = $(":focus").length || $("#color_picker").is(":visible"); if (exceptions) return false; const modKey = !svgedit.browser.isMac() ? "ctrlKey" : "metaKey"; const cmd = e[modKey] ? "cmd_" : ""; const shift = e.shiftKey ? "shift_" : ""; const key = cmd + shift + e.key.toLowerCase(); const canvasMode = state.get("canvasMode"); const modalIsOpen = Object.values(editor.modal).filter((modal) => { const isHidden = modal.el.classList.contains("hidden"); if (!isHidden && key === "cmd_enter") modal.confirm(); if (!isHidden && key === "escape") modal.close(); return !isHidden; }).length; // keyboard shortcut exists for app if (!modalIsOpen && keys[key]) { e.preventDefault(); keys[key].cb(); } }); document.addEventListener("keyup", function(e){ if ($("#color_picker").is(":visible")) return e; const canvasMode = state.get("canvasMode"); const key = e.key.toLowerCase(); const keys = { "alt": ()=> $("#workarea").removeClass("out"), " ": ()=> editor.pan.stopPan(), } if (keys[key]) { e.preventDefault(); keys[key](); } }) // modal shortcuts const shortcutEl = document.getElementById("shortcuts"); const docFrag = document.createDocumentFragment(); for (const key in keys) { const name = keys[key].name; if (!name) continue; const shortcut = document.createElement("div"); shortcut.classList.add("shortcut") const chords = key.split("_"); const shortcutKeys = document.createElement("div"); shortcutKeys.classList.add("shortcut-keys") chords.forEach(key => { const shortcutKey = document.createElement("div"); shortcutKey.classList.add("shortcut-key"); if (key === "arrowright") key = "→"; if (key === "arrowleft") key = "←"; if (key === "arrowup") key = "↑"; if (key === "arrowdown") key = "↓"; if (key === " ") key = "SPACEBAR"; if (key === "shift") key = "⇧";<|fim▁hole|> shortcutKey.textContent = key; shortcutKeys.appendChild(shortcutKey); shortcut.appendChild(shortcutKeys); }); const shortcutName = document.createElement("div"); shortcutName.classList.add("shortcut-name"); shortcutName.textContent = name; shortcutKeys.appendChild(shortcutName); docFrag.appendChild(shortcutKeys); } shortcutEl.appendChild(docFrag); }<|fim▁end|>
if (key === "cmd") key = svgedit.browser.isMac() ? "⌘" : "Ctrl";
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-<|fim▁hole|>@author: cheng.li """ from PyFin.Math.Distributions.NormalDistribution import NormalDistribution from PyFin.Math.Distributions.NormalDistribution import CumulativeNormalDistribution from PyFin.Math.Distributions.NormalDistribution import InverseCumulativeNormal __all__ = ['NormalDistribution', 'CumulativeNormalDistribution', 'InverseCumulativeNormal']<|fim▁end|>
u""" Created on 2015-7-23
<|file_name|>math_query_sql.rs<|end_file_name|><|fim▁begin|>// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. #[macro_use] extern crate criterion; use criterion::Criterion; use std::sync::{Arc, Mutex}; use tokio::runtime::Runtime; extern crate arrow; extern crate datafusion; use arrow::{ array::{Float32Array, Float64Array}, datatypes::{DataType, Field, Schema}, record_batch::RecordBatch, }; use datafusion::error::Result; use datafusion::datasource::MemTable; use datafusion::execution::context::ExecutionContext; fn query(ctx: Arc<Mutex<ExecutionContext>>, sql: &str) { let rt = Runtime::new().unwrap(); // execute the query let df = ctx.lock().unwrap().sql(&sql).unwrap(); rt.block_on(df.collect()).unwrap(); } fn create_context( array_len: usize, batch_size: usize, ) -> Result<Arc<Mutex<ExecutionContext>>> { // define a schema. let schema = Arc::new(Schema::new(vec![ Field::new("f32", DataType::Float32, false), Field::new("f64", DataType::Float64, false), ])); // define data. let batches = (0..array_len / batch_size) .map(|i| { RecordBatch::try_new( schema.clone(), vec![ Arc::new(Float32Array::from(vec![i as f32; batch_size])), Arc::new(Float64Array::from(vec![i as f64; batch_size])), ], )<|fim▁hole|> let mut ctx = ExecutionContext::new(); // declare a table in memory. In spark API, this corresponds to createDataFrame(...). let provider = MemTable::new(schema, vec![batches])?; ctx.register_table("t", Box::new(provider)); Ok(Arc::new(Mutex::new(ctx))) } fn criterion_benchmark(c: &mut Criterion) { let array_len = 1048576; // 2^20 let batch_size = 512; // 2^9 let ctx = create_context(array_len, batch_size).unwrap(); c.bench_function("sqrt_20_9", |b| { b.iter(|| query(ctx.clone(), "SELECT sqrt(f32) FROM t")) }); let array_len = 1048576; // 2^20 let batch_size = 4096; // 2^12 let ctx = create_context(array_len, batch_size).unwrap(); c.bench_function("sqrt_20_12", |b| { b.iter(|| query(ctx.clone(), "SELECT sqrt(f32) FROM t")) }); let array_len = 4194304; // 2^22 let batch_size = 4096; // 2^12 let ctx = create_context(array_len, batch_size).unwrap(); c.bench_function("sqrt_22_12", |b| { b.iter(|| query(ctx.clone(), "SELECT sqrt(f32) FROM t")) }); let array_len = 4194304; // 2^22 let batch_size = 16384; // 2^14 let ctx = create_context(array_len, batch_size).unwrap(); c.bench_function("sqrt_22_14", |b| { b.iter(|| query(ctx.clone(), "SELECT sqrt(f32) FROM t")) }); } criterion_group!(benches, criterion_benchmark); criterion_main!(benches);<|fim▁end|>
.unwrap() }) .collect::<Vec<_>>();
<|file_name|>AIGame.cpp<|end_file_name|><|fim▁begin|>/* Copyright(C) 2013 Danny Sok <[email protected]> This program is free software : you can redistribute it and / or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program.If not, see <http://www.gnu.org/licenses/>. */ // ---------------------------------------- // Filename: AIGame.cpp // Description: Represents the state of the game from the AI perspective // Author: Danny Sok // Date Created: 9/08/2013 // Date Last Modified: 27/09/2013 #include "AIGame.h" #include "..\game\Game.h" AIGame::~AIGame() { //destroy(); } void AIGame::init() { m_hands.reserve(4); m_deck.reserve(52); } void AIGame::init(std::vector< std::vector<Card> > hands, int playerPos, int passCount, bool isFreeReign) { m_hands = hands; m_originalPlayerPos = playerPos; m_playerPos = playerPos; m_passCount = passCount; m_playerCount = Game::getInstance().getPlayerCount(); m_isGuessMode = false; m_isFreeReign = isFreeReign; m_turnCount++; m_depthCount = 1; } void AIGame::init(std::vector<Card> deck, std::vector<Card> playerHand, int playerPos, int passCount, bool isFreeReign) { m_deck = deck; m_playerPos = playerPos; m_originalPlayerPos = playerPos; m_passCount = passCount; m_playerCount = Game::getInstance().getPlayerCount(); m_isGuessMode = true; m_isFreeReign = isFreeReign; m_depthCount = 1; m_turnCount++; m_playerHand.insert(m_playerHand.end(), playerHand.begin(), playerHand.end()); deal(); } void AIGame::initAddedHands(int playerPos, int passCount, int originalPlayerPos, bool guessMode, bool isFreeReign, int depth) { m_playerPos = playerPos; m_originalPlayerPos = originalPlayerPos; m_playerCount = Game::getInstance().getPlayerCount(); m_passCount = passCount % m_playerCount; m_isGuessMode = guessMode; //m_isGuessMode = guessMode; m_isFreeReign = isFreeReign; m_depthCount = depth+1; // This will be needed to ensure we don't use too much memory. Just say the game is over. m_turnCount++; } void AIGame::destroy() { m_hands.clear(); m_hands.shrink_to_fit(); m_deck.clear(); m_deck.shrink_to_fit(); } void AIGame::setGuessMode(bool b) { m_isGuessMode = b; } void AIGame::deal() { int size = 0; for (int i = 0; i < m_playerCount; i++) { if (i != m_playerPos) { std::vector<Card> hand; if (Game::getInstance().getPlayer(i)->lock()) { size = Game::getInstance().getPlayer(i)->getCards().size(); Game::getInstance().getPlayer(i)->unlock(); } for (int j = 0; j < size; j++) { m_deck[j].clearImgs(); hand.push_back(m_deck[j]); } m_hands.push_back(sortCards(hand)); } else { //m_hands[m_playerPos].insert(m_hands[m_playerPos].end(), m_playerHand.begin(), m_playerHand.end()); m_hands.push_back(m_playerHand); } } } void AIGame::addHand(std::vector<Card> hand) { m_hands.push_back(hand); } int AIGame::getPlayerCount() { return m_playerCount; } int AIGame::getPassCount() { return m_passCount; } std::vector<Card> AIGame::getHand(int pos) { return m_hands[pos]; } bool AIGame::isOver() { bool b = false; if (m_depthCount >= 2) return true; // Stop generating any children once we are 10 nodes deep // If it isn't guess mode (i.e. god mode) then search for a winner normally if (!m_isGuessMode) { if (isWinner()) return true; } // otherwise for guess mode, stop the search once it hits a free reign turn else { if (isWinner()) return true; else if (isFreeReign()) return true; } return b; } bool AIGame::isWinner(int playerNum) { return m_hands[playerNum].size() == 0; } bool AIGame::isWinner() { return m_hands[m_originalPlayerPos].size() == 0; } bool AIGame::isFreeReign(int playerNum) { return m_isFreeReign && m_originalPlayerPos == playerNum; } bool AIGame::isFreeReign() { return m_isFreeReign && m_originalPlayerPos == m_playerPos; } std::vector<Card> AIGame::sortCards(std::vector<Card> pHand) { auto hand = pHand; //std::sort(cards.begin(), cards.end(), cmp()); int swapped; int i; for (i = 1; i < hand.size(); i++) { swapped = 0; //this flag is to check if the array is already sorted int j; Card temp; for(j = 0; j < hand.size() - i; j++) { if (hand[j].getNumber() > hand[j + 1].getNumber()) { if (! (hand[j + 1].getNumber() == Ace || hand[j + 1].getNumber() == Two) ) { temp = hand[j]; hand[j] = hand[j+1]; hand[j+1] = temp; swapped = 1; } } else if (hand[j].getNumber() < hand[j + 1].getNumber()) { if (hand[j].getNumber() == Ace || hand[j].getNumber() == Two) { temp = hand[j]; hand[j] = hand[j+1]; hand[j+1] = temp; swapped = 1; } } else //if left.Number == right.Number { if (hand[j].getSuite() > hand[j + 1].getSuite()) { temp = hand[j]; hand[j] = hand[j+1]; hand[j+1] = temp; swapped = 1; } } if (hand[j].getNumber() == Two && hand[j + 1].getNumber() == Ace) { temp = hand[j]; hand[j] = hand[j+1]; hand[j+1] = temp; swapped = 1; } } if(!swapped){ break; //if it is sorted then stop } } return hand; }<|fim▁hole|>{ return m_originalPlayerPos; } int AIGame::getDepth() { return m_depthCount; }<|fim▁end|>
int AIGame::getOriginalPlayerPos()
<|file_name|>flash.js<|end_file_name|><|fim▁begin|>import { timeout as d3_timeout } from 'd3-timer'; export function uiFlash(context) { var _flashTimer; var _duration = 2000; var _iconName = '#iD-icon-no'; var _iconClass = 'disabled'; var _text = ''; var _textClass; function flash() { if (_flashTimer) { _flashTimer.stop(); } context.container().select('.main-footer-wrap') .classed('footer-hide', true) .classed('footer-show', false); context.container().select('.flash-wrap') .classed('footer-hide', false) .classed('footer-show', true); var content = context.container().select('.flash-wrap').selectAll('.flash-content') .data([0]); // Enter var contentEnter = content.enter() .append('div') .attr('class', 'flash-content'); var iconEnter = contentEnter .append('svg') .attr('class', 'flash-icon') .append('g') .attr('transform', 'translate(10,10)'); iconEnter .append('circle') .attr('r', 9); iconEnter .append('use') .attr('transform', 'translate(-7,-7)') .attr('width', '14') .attr('height', '14'); contentEnter .append('div') .attr('class', 'flash-text'); // Update content = content .merge(contentEnter); content .selectAll('.flash-icon') .attr('class', 'flash-icon ' + (_iconClass || '')); content .selectAll('.flash-icon use') .attr('xlink:href', _iconName); content .selectAll('.flash-text') .attr('class', 'flash-text ' + (_textClass || '')) .text(_text); _flashTimer = d3_timeout(function() { _flashTimer = null; context.container().select('.main-footer-wrap') .classed('footer-hide', false) .classed('footer-show', true); context.container().select('.flash-wrap') .classed('footer-hide', true) .classed('footer-show', false); }, _duration); return content; } flash.duration = function(_) { if (!arguments.length) return _duration; _duration = _; return flash; }; flash.text = function(_) { if (!arguments.length) return _text; _text = _; return flash; }; flash.textClass = function(_) { if (!arguments.length) return _textClass; _textClass = _; return flash; };<|fim▁hole|> _iconName = _; return flash; }; flash.iconClass = function(_) { if (!arguments.length) return _iconClass; _iconClass = _; return flash; }; return flash; }<|fim▁end|>
flash.iconName = function(_) { if (!arguments.length) return _iconName;
<|file_name|>rte.controller.js<|end_file_name|><|fim▁begin|>angular.module("umbraco") .controller("Umbraco.PropertyEditors.RTEController", function ($rootScope, $scope, $q, $locale, dialogService, $log, imageHelper, assetsService, $timeout, tinyMceService, angularHelper, stylesheetResource, macroService, editorState) { $scope.isLoading = true; //To id the html textarea we need to use the datetime ticks because we can have multiple rte's per a single property alias // because now we have to support having 2x (maybe more at some stage) content editors being displayed at once. This is because // we have this mini content editor panel that can be launched with MNTP. var d = new Date(); var n = d.getTime(); $scope.textAreaHtmlId = $scope.model.alias + "_" + n + "_rte"; function syncContent(editor){ editor.save(); angularHelper.safeApply($scope, function () { $scope.model.value = editor.getContent(); }); //make the form dirty manually so that the track changes works, setting our model doesn't trigger // the angular bits because tinymce replaces the textarea. angularHelper.getCurrentForm($scope).$setDirty(); } tinyMceService.configuration().then(function (tinyMceConfig) { //config value from general tinymce.config file var validElements = tinyMceConfig.validElements; //These are absolutely required in order for the macros to render inline //we put these as extended elements because they get merged on top of the normal allowed elements by tiny mce var extendedValidElements = "@[id|class|style],-div[id|dir|class|align|style],ins[datetime|cite],-ul[class|style],-li[class|style],span[id|class|style]"; var invalidElements = tinyMceConfig.inValidElements; var plugins = _.map(tinyMceConfig.plugins, function (plugin) { if (plugin.useOnFrontend) { return plugin.name; } }).join(" "); var editorConfig = $scope.model.config.editor; if (!editorConfig || angular.isString(editorConfig)) { editorConfig = tinyMceService.defaultPrevalues(); } //config value on the data type var toolbar = editorConfig.toolbar.join(" | "); var stylesheets = []; var styleFormats = []; var await = []; if (!editorConfig.maxImageSize && editorConfig.maxImageSize != 0) { editorConfig.maxImageSize = tinyMceService.defaultPrevalues().maxImageSize; } //queue file loading if (typeof tinymce === "undefined") { // Don't reload tinymce if already loaded await.push(assetsService.loadJs("lib/tinymce/tinymce.min.js", $scope)); } //queue rules loading angular.forEach(editorConfig.stylesheets, function (val, key) { stylesheets.push(Umbraco.Sys.ServerVariables.umbracoSettings.cssPath + "/" + val + ".css?" + new Date().getTime()); await.push(stylesheetResource.getRulesByName(val).then(function (rules) { angular.forEach(rules, function (rule) { var r = {}; r.title = rule.name; if (rule.selector[0] == ".") { r.inline = "span"; r.classes = rule.selector.substring(1); } else if (rule.selector[0] == "#") { r.inline = "span"; r.attributes = { id: rule.selector.substring(1) }; } else if (rule.selector[0] != "." && rule.selector.indexOf(".") > -1) { var split = rule.selector.split("."); r.block = split[0]; r.classes = rule.selector.substring(rule.selector.indexOf(".") + 1).replace(".", " "); } else if (rule.selector[0] != "#" && rule.selector.indexOf("#") > -1) { var split = rule.selector.split("#"); r.block = split[0]; r.classes = rule.selector.substring(rule.selector.indexOf("#") + 1); } else { r.block = rule.selector; } styleFormats.push(r); }); })); }); //stores a reference to the editor var tinyMceEditor = null; // these languages are available for localization var availableLanguages = [ 'da', 'de', 'en', 'en_us', 'fi', 'fr', 'he', 'it', 'ja', 'nl', 'no', 'pl', 'pt', 'ru', 'sv', 'zh' ]; //define fallback language var language = 'en_us'; //get locale from angular and match tinymce format. Angular localization is always in the format of ru-ru, de-de, en-gb, etc. //wheras tinymce is in the format of ru, de, en, en_us, etc. var localeId = $locale.id.replace('-', '_'); //try matching the language using full locale format var languageMatch = _.find(availableLanguages, function(o) { return o === localeId; }); //if no matches, try matching using only the language if (languageMatch === undefined) { var localeParts = localeId.split('_'); languageMatch = _.find(availableLanguages, function(o) { return o === localeParts[0]; }); } //if a match was found - set the language if (languageMatch !== undefined) { language = languageMatch; } //wait for queue to end $q.all(await).then(function () { //create a baseline Config to exten upon var baseLineConfigObj = { mode: "exact", skin: "umbraco", plugins: plugins, valid_elements: validElements, invalid_elements: invalidElements, extended_valid_elements: extendedValidElements, menubar: false, statusbar: false, relative_urls: false, height: editorConfig.dimensions.height, width: editorConfig.dimensions.width, maxImageSize: editorConfig.maxImageSize, toolbar: toolbar, content_css: stylesheets, style_formats: styleFormats, language: language, //see http://archive.tinymce.com/wiki.php/Configuration:cache_suffix cache_suffix: "?umb__rnd=" + Umbraco.Sys.ServerVariables.application.cacheBuster }; if (tinyMceConfig.customConfig) { //if there is some custom config, we need to see if the string value of each item might actually be json and if so, we need to // convert it to json instead of having it as a string since this is what tinymce requires for (var i in tinyMceConfig.customConfig) { var val = tinyMceConfig.customConfig[i]; if (val) { val = val.toString().trim(); if (val.detectIsJson()) { try { tinyMceConfig.customConfig[i] = JSON.parse(val); //now we need to check if this custom config key is defined in our baseline, if it is we don't want to //overwrite the baseline config item if it is an array, we want to concat the items in the array, otherwise //if it's an object it will overwrite the baseline if (angular.isArray(baseLineConfigObj[i]) && angular.isArray(tinyMceConfig.customConfig[i])) { //concat it and below this concat'd array will overwrite the baseline in angular.extend tinyMceConfig.customConfig[i] = baseLineConfigObj[i].concat(tinyMceConfig.customConfig[i]); } } catch (e) { //cannot parse, we'll just leave it } } if (val === "true") { tinyMceConfig.customConfig[i] = true; } if (val === "false") { tinyMceConfig.customConfig[i] = false; } } } angular.extend(baseLineConfigObj, tinyMceConfig.customConfig); } //set all the things that user configs should not be able to override baseLineConfigObj.elements = $scope.textAreaHtmlId; //this is the exact textarea id to replace! baseLineConfigObj.setup = function (editor) { //set the reference tinyMceEditor = editor; //enable browser based spell checking editor.on('init', function (e) { editor.getBody().setAttribute('spellcheck', true); }); //We need to listen on multiple things here because of the nature of tinymce, it doesn't //fire events when you think! //The change event doesn't fire when content changes, only when cursor points are changed and undo points //are created. the blur event doesn't fire if you insert content into the editor with a button and then //press save. //We have a couple of options, one is to do a set timeout and check for isDirty on the editor, or we can //listen to both change and blur and also on our own 'saving' event. I think this will be best because a //timer might end up using unwanted cpu and we'd still have to listen to our saving event in case they clicked //save before the timeout elapsed. //TODO: We need to re-enable something like this to ensure the track changes is working with tinymce // so we can detect if the form is dirty or not, Per has some better events to use as this one triggers // even if you just enter/exit with mouse cursuor which doesn't really mean it's changed. // see: http://issues.umbraco.org/issue/U4-4485 //var alreadyDirty = false; //editor.on('change', function (e) { // angularHelper.safeApply($scope, function () { // $scope.model.value = editor.getContent(); // if (!alreadyDirty) { // //make the form dirty manually so that the track changes works, setting our model doesn't trigger // // the angular bits because tinymce replaces the textarea. // var currForm = angularHelper.getCurrentForm($scope); // currForm.$setDirty(); // alreadyDirty = true; // } // }); //}); //when we leave the editor (maybe) editor.on('blur', function (e) { editor.save(); angularHelper.safeApply($scope, function () { $scope.model.value = editor.getContent(); }); }); //when buttons modify content editor.on('ExecCommand', function (e) { syncContent(editor); }); // Update model on keypress editor.on('KeyUp', function (e) { syncContent(editor); }); // Update model on change, i.e. copy/pasted text, plugins altering content editor.on('SetContent', function (e) { if (!e.initial) { syncContent(editor); } }); editor.on('ObjectResized', function (e) { var qs = "?width=" + e.width + "&height=" + e.height + "&mode=max"; var srcAttr = $(e.target).attr("src"); var path = srcAttr.split("?")[0]; $(e.target).attr("data-mce-src", path + qs); syncContent(editor); }); tinyMceService.createLinkPicker(editor, $scope, function(currentTarget, anchorElement) { $scope.linkPickerOverlay = { view: "linkpicker", currentTarget: currentTarget, anchors: editorState.current ? tinyMceService.getAnchorNames(JSON.stringify(editorState.current.properties)) : [], ignoreUserStartNodes: $scope.model.config.ignoreUserStartNodes === "1", <|fim▁hole|> $scope.linkPickerOverlay = null; } }; }); //Create the insert media plugin tinyMceService.createMediaPicker(editor, $scope, function(currentTarget, userData){ var ignoreUserStartNodes = false; var startNodeId = userData.startMediaIds.length !== 1 ? -1 : userData.startMediaIds[0]; var startNodeIsVirtual = userData.startMediaIds.length !== 1; if ($scope.model.config.ignoreUserStartNodes === "1") { ignoreUserStartNodes = true; startNodeId = -1; startNodeIsVirtual = true; } $scope.mediaPickerOverlay = { currentTarget: currentTarget, onlyImages: true, showDetails: true, disableFolderSelect: true, startNodeId: startNodeId, startNodeIsVirtual: startNodeIsVirtual, ignoreUserStartNodes: ignoreUserStartNodes, view: "mediapicker", show: true, submit: function(model) { tinyMceService.insertMediaInEditor(editor, model.selectedImages[0]); $scope.mediaPickerOverlay.show = false; $scope.mediaPickerOverlay = null; } }; }); //Create the embedded plugin tinyMceService.createInsertEmbeddedMedia(editor, $scope, function() { $scope.embedOverlay = { view: "embed", show: true, submit: function(model) { tinyMceService.insertEmbeddedMediaInEditor(editor, model.embed.preview); $scope.embedOverlay.show = false; $scope.embedOverlay = null; } }; }); //Create the insert macro plugin tinyMceService.createInsertMacro(editor, $scope, function(dialogData) { $scope.macroPickerOverlay = { view: "macropicker", dialogData: dialogData, show: true, submit: function(model) { var macroObject = macroService.collectValueData(model.selectedMacro, model.macroParams, dialogData.renderingEngine); tinyMceService.insertMacroInEditor(editor, macroObject, $scope); $scope.macroPickerOverlay.show = false; $scope.macroPickerOverlay = null; } }; }); }; /** Loads in the editor */ function loadTinyMce() { //we need to add a timeout here, to force a redraw so TinyMCE can find //the elements needed $timeout(function () { tinymce.DOM.events.domLoaded = true; tinymce.init(baseLineConfigObj); $scope.isLoading = false; }, 200, false); } loadTinyMce(); //here we declare a special method which will be called whenever the value has changed from the server //this is instead of doing a watch on the model.value = faster $scope.model.onValueChanged = function (newVal, oldVal) { //update the display val again if it has changed from the server; //uses an empty string in the editor when the value is null tinyMceEditor.setContent(newVal || "", { format: 'raw' }); //we need to manually fire this event since it is only ever fired based on loading from the DOM, this // is required for our plugins listening to this event to execute tinyMceEditor.fire('LoadContent', null); }; //listen for formSubmitting event (the result is callback used to remove the event subscription) var unsubscribe = $scope.$on("formSubmitting", function () { //TODO: Here we should parse out the macro rendered content so we can save on a lot of bytes in data xfer // we do parse it out on the server side but would be nice to do that on the client side before as well. if (tinyMceEditor !== undefined && tinyMceEditor != null && !$scope.isLoading) { $scope.model.value = tinyMceEditor.getContent(); } }); //when the element is disposed we need to unsubscribe! // NOTE: this is very important otherwise if this is part of a modal, the listener still exists because the dom // element might still be there even after the modal has been hidden. $scope.$on('$destroy', function () { unsubscribe(); if (tinyMceEditor !== undefined && tinyMceEditor != null) { tinyMceEditor.destroy(); } }); }); }); });<|fim▁end|>
show: true, submit: function(model) { tinyMceService.insertLinkInEditor(editor, model.target, anchorElement); $scope.linkPickerOverlay.show = false;
<|file_name|>borrowck-mut-vec-as-imm-slice.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // run-pass fn want_slice(v: &[isize]) -> isize { let mut sum = 0; for i in v { sum += *i; } sum } fn has_mut_vec(v: Vec<isize> ) -> isize { want_slice(&v) } <|fim▁hole|><|fim▁end|>
pub fn main() { assert_eq!(has_mut_vec(vec![1, 2, 3]), 6); }
<|file_name|>parseInput.js<|end_file_name|><|fim▁begin|>/** * Takes an array of strings that represent functional dependencies and returns * them as an array of objects containing functionaldependency objects. */ var parseInput = function(lines) {<|fim▁hole|> lines = lines.split('\n'); var functionalDependencies = new DependencySet(); for(var i = 0; i < lines.length; ++i) { var line = lines[i]; var arrowIndex = line.indexOf('->'); if(arrowIndex >= 0) { var lhs = line.substring(0, arrowIndex).trim().split(','); var rhs = line.substring(arrowIndex + 2, line.length).trim().split(','); /* Trim all the individual attributes */ for(var j=0;j<lhs.length;++j) lhs[j] = lhs[j].trim(); for(var k=0;k<rhs.length;++k) rhs[k] = rhs[k].trim(); /* Make sure they're nonzero and add them to the list */ if(lhs.length > 0 && rhs.length > 0) { var functionalDependency = new FunctionalDependency(lhs, rhs); functionalDependencies.add(functionalDependency); } } } return functionalDependencies; };<|fim▁end|>
<|file_name|>delete_contents.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # a script to delete the contents of an s3 buckets # import the sys and boto3 modules import sys import boto3 # create an s3 resource s3 = boto3.resource('s3') # iterate over the script arguments as bucket names for bucket_name in sys.argv[1:]: # use the bucket name to create a bucket object bucket = s3.Bucket(bucket_name) # delete the bucket's contents and print the response or error for key in bucket.objects.all():<|fim▁hole|> try: response = key.delete() print response except Exception as error: print error<|fim▁end|>
<|file_name|>DimmerItem.java<|end_file_name|><|fim▁begin|>/** * Copyright (c) 2014,2019 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 */ package org.eclipse.smarthome.core.library.items; import java.util.ArrayList; import java.util.Collections; import java.util.List; import org.eclipse.jdt.annotation.NonNullByDefault; import org.eclipse.smarthome.core.library.CoreItemFactory; import org.eclipse.smarthome.core.library.types.IncreaseDecreaseType; import org.eclipse.smarthome.core.library.types.OnOffType; import org.eclipse.smarthome.core.library.types.PercentType; import org.eclipse.smarthome.core.types.Command; import org.eclipse.smarthome.core.types.RefreshType; import org.eclipse.smarthome.core.types.State; import org.eclipse.smarthome.core.types.UnDefType; /** * A DimmerItem can be used as a switch (ON/OFF), but it also accepts percent values * to reflect the dimmed state. * * @author Kai Kreuzer - Initial contribution and API * @author Markus Rathgeb - Support more types for getStateAs * */ @NonNullByDefault public class DimmerItem extends SwitchItem { private static List<Class<? extends State>> acceptedDataTypes = new ArrayList<Class<? extends State>>(); <|fim▁hole|> acceptedDataTypes.add(OnOffType.class); acceptedDataTypes.add(UnDefType.class); acceptedCommandTypes.add(PercentType.class); acceptedCommandTypes.add(OnOffType.class); acceptedCommandTypes.add(IncreaseDecreaseType.class); acceptedCommandTypes.add(RefreshType.class); } public DimmerItem(String name) { super(CoreItemFactory.DIMMER, name); } /* package */ DimmerItem(String type, String name) { super(type, name); } public void send(PercentType command) { internalSend(command); } @Override public List<Class<? extends State>> getAcceptedDataTypes() { return Collections.unmodifiableList(acceptedDataTypes); } @Override public List<Class<? extends Command>> getAcceptedCommandTypes() { return Collections.unmodifiableList(acceptedCommandTypes); } @Override public void setState(State state) { if (isAcceptedState(acceptedDataTypes, state)) { // try conversion State convertedState = state.as(PercentType.class); if (convertedState != null) { applyState(convertedState); } else { applyState(state); } } else { logSetTypeError(state); } } }<|fim▁end|>
private static List<Class<? extends Command>> acceptedCommandTypes = new ArrayList<Class<? extends Command>>(); static { acceptedDataTypes.add(PercentType.class);
<|file_name|>issue-70972-dyn-trait.rs<|end_file_name|><|fim▁begin|>const F: &'static dyn Send = &7u32; fn main() { let a: &dyn Send = &7u32;<|fim▁hole|> _ => {} } }<|fim▁end|>
match a { F => panic!(), //~^ ERROR `&dyn Send` cannot be used in patterns
<|file_name|>bench.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use cssparser::{Parser, SourcePosition}; use parking_lot::RwLock; use rayon; use servo_url::ServoUrl; use std::sync::Arc; use style::error_reporting::ParseErrorReporter; use style::media_queries::MediaList; use style::parser::ParserContextExtraData; use style::properties::{longhands, Importance, PropertyDeclaration, PropertyDeclarationBlock}; use style::rule_tree::{CascadeLevel, RuleTree, StrongRuleNode, StyleSource}; use style::stylesheets::{Origin, Stylesheet, CssRule}; use test::{self, Bencher}; struct ErrorringErrorReporter; impl ParseErrorReporter for ErrorringErrorReporter { fn report_error(&self, _input: &mut Parser, position: SourcePosition, message: &str, url: &ServoUrl) { panic!("CSS error: {}\t\n{:?} {}", url.as_str(), position, message); } } struct AutoGCRuleTree<'a>(&'a RuleTree); impl<'a> AutoGCRuleTree<'a> { fn new(r: &'a RuleTree) -> Self { AutoGCRuleTree(r) } } impl<'a> Drop for AutoGCRuleTree<'a> { fn drop(&mut self) { unsafe { self.0.gc() } } } fn parse_rules(css: &str) -> Vec<(StyleSource, CascadeLevel)> { let s = Stylesheet::from_str(css, ServoUrl::parse("http://localhost").unwrap(), Origin::Author, MediaList { media_queries: vec![], }, None, &ErrorringErrorReporter, ParserContextExtraData {}); let rules = s.rules.read(); rules.0.iter().filter_map(|rule| { match *rule { CssRule::Style(ref style_rule) => Some(style_rule), _ => None, } }).cloned().map(StyleSource::Style).map(|s| { (s, CascadeLevel::UserNormal) }).collect() } fn test_insertion(rule_tree: &RuleTree, rules: Vec<(StyleSource, CascadeLevel)>) -> StrongRuleNode { rule_tree.insert_ordered_rules(rules.into_iter()) } fn test_insertion_style_attribute(rule_tree: &RuleTree, rules: &[(StyleSource, CascadeLevel)]) -> StrongRuleNode { let mut rules = rules.to_vec(); rules.push((StyleSource::Declarations(Arc::new(RwLock::new(PropertyDeclarationBlock::with_one( PropertyDeclaration::Display( longhands::display::SpecifiedValue::block), Importance::Normal )))), CascadeLevel::UserNormal)); test_insertion(rule_tree, rules) } #[bench] fn bench_insertion_basic(b: &mut Bencher) { let r = RuleTree::new(); let rules_matched = parse_rules( ".foo { width: 200px; } \ .bar { height: 500px; } \ .baz { display: block; }"); b.iter(|| { let _gc = AutoGCRuleTree::new(&r); for _ in 0..(4000 + 400) { test::black_box(test_insertion(&r, rules_matched.clone())); } }) } #[bench] fn bench_insertion_basic_per_element(b: &mut Bencher) { let r = RuleTree::new();<|fim▁hole|> .bar { height: 500px; } \ .baz { display: block; }"); b.iter(|| { let _gc = AutoGCRuleTree::new(&r); test::black_box(test_insertion(&r, rules_matched.clone())); }); } #[bench] fn bench_expensive_insertion(b: &mut Bencher) { let r = RuleTree::new(); // This test case tests a case where you style a bunch of siblings // matching the same rules, with a different style attribute each // one. let rules_matched = parse_rules( ".foo { width: 200px; } \ .bar { height: 500px; } \ .baz { display: block; }"); b.iter(|| { let _gc = AutoGCRuleTree::new(&r); for _ in 0..(4000 + 400) { test::black_box(test_insertion_style_attribute(&r, &rules_matched)); } }); } #[bench] fn bench_insertion_basic_parallel(b: &mut Bencher) { let r = RuleTree::new(); let rules_matched = parse_rules( ".foo { width: 200px; } \ .bar { height: 500px; } \ .baz { display: block; }"); b.iter(|| { let _gc = AutoGCRuleTree::new(&r); rayon::scope(|s| { for _ in 0..4 { s.spawn(|s| { for _ in 0..1000 { test::black_box(test_insertion(&r, rules_matched.clone())); } s.spawn(|_| { for _ in 0..100 { test::black_box(test_insertion(&r, rules_matched.clone())); } }) }) } }); }); } #[bench] fn bench_expensive_insersion_parallel(b: &mut Bencher) { let r = RuleTree::new(); let rules_matched = parse_rules( ".foo { width: 200px; } \ .bar { height: 500px; } \ .baz { display: block; }"); b.iter(|| { let _gc = AutoGCRuleTree::new(&r); rayon::scope(|s| { for _ in 0..4 { s.spawn(|s| { for _ in 0..1000 { test::black_box(test_insertion_style_attribute(&r, &rules_matched)); } s.spawn(|_| { for _ in 0..100 { test::black_box(test_insertion_style_attribute(&r, &rules_matched)); } }) }) } }); }); }<|fim▁end|>
let rules_matched = parse_rules( ".foo { width: 200px; } \
<|file_name|>VirtualHost.cpp<|end_file_name|><|fim▁begin|>/* * * (C) 2015 - ntop.org * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include "ntop_includes.h" /* ************************************************** */ VirtualHost::VirtualHost(HostHash *_h, char *_name) : GenericHashEntry(NULL) { h = _h, name = strdup(_name), last_num_requests = 0, last_diff = 0, trend = trend_stable; h->incNumHTTPEntries(); } /* ************************************************** */ VirtualHost::~VirtualHost() { h->decNumHTTPEntries(); if(name) free(name); } /* ************************************************** */ void VirtualHost::update_stats() { u_int32_t diff = (u_int32_t)(num_requests.getNumBytes() - last_num_requests); trend = (diff > last_diff) ? trend_up : ((diff < last_diff) ? trend_down : trend_stable);<|fim▁hole|> */ last_num_requests = num_requests.getNumBytes(), last_diff = diff; };<|fim▁end|>
/* ntop->getTrace()->traceEvent(TRACE_WARNING, "%s\t%u [%u][%u]", name, diff, num_requests.getNumBytes(), last_num_requests);
<|file_name|>resources.rs<|end_file_name|><|fim▁begin|>/*! Resource Directory. See [here](../../resources/index.html) for the API docs. # Examples ``` # #![allow(unused_variables)] use pelite::pe64::{Pe, PeFile}; use pelite::resources::FindError; # #[allow(dead_code)] fn example<'a>(file: PeFile<'a>) -> Result<&'a [u8], FindError> { // Access the resources<|fim▁hole|> // Find the desired resource by its path let data = resources.find_data("/Manifest/2/1033")?; let manifest = data.bytes()?; Ok(manifest) } ``` */ #[cfg(test)] pub(crate) fn test<'a, P: super::Pe<'a>>(pe: P) -> crate::Result<()> { pe.resources().and_then(crate::resources::test) }<|fim▁end|>
let resources = file.resources()?;