prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>selectFieldFormFields.ts<|end_file_name|><|fim▁begin|>/// <reference path="../boolean/booleanFieldMetadataModel.ts" /> /// <reference path="../date/dateFieldMetadataModel.ts" /><|fim▁hole|>/// <reference path="../select/selectFieldMetadataModel.ts" /> /// <reference path="../textarea/textareaFieldMetadataModel.ts" /> /// <reference path="../text/textFieldMetadataModel.ts" /> /// <reference path="./../../../data/createFieldFormFields.ts" /> /// <reference path="../../../models/core/entityMetadataModel.ts" /> /// <reference path="../../../interfaces/localization/IResources.ts" /> /// <reference path="../../../services/localization/localizationService.ts" /> /// <reference path="../../../../config/config.ts" /> module Data { 'use strict'; export class CreateSelectFieldFormFields { public static GetData():Models.EntityMetadata { var selectFields:Models.EntityMetadata = Data.CreateFieldFormFields.GetData(null); selectFields.Fields = selectFields.Fields.concat([ CreateSelectFieldFormFields.ValuesField(), CreateSelectFieldFormFields.DefaultValueField() ]); return selectFields; } private static ValuesField():Models.ListFieldMetadata { var result:Models.ListFieldMetadata = new Models.ListFieldMetadata(); result.FieldSystemName = 'Values'; result.FieldName = Services.LocalizationService.Resources.Values; result.FieldDescription = result.FieldName; result.Required = true; return result; } private static DefaultValueField():Models.TextFieldMetadata { var result:Models.TextFieldMetadata = new Models.TextFieldMetadata(); result.FieldSystemName = 'DefaultValue'; result.FieldName = Services.LocalizationService.Resources.DefaultValue; result.FieldDescription = result.FieldName; result.Required = false; return result; } } }<|fim▁end|>
/// <reference path="../list/listFieldMetadataModel.ts" />
<|file_name|>bolly.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python3 from bollinger import bands, plot, strategies import argparse parser = argparse.ArgumentParser(description="plots bollinger bands or suggests investments", epilog="example: bolly.py plot AMZN FB") parser.add_argument("action", metavar="ACTION", choices=["plot", "suggest"], help="either plot or suggest") parser.add_argument("symbols", metavar="SYMBOL", nargs="+", help="stock symbols") parser.add_argument("-s", "--strategy", choices=["uponce", "downonce", "moreup", "moredown"], default="moredown", help="selects invesment strategy") args = parser.parse_args() if args.action == "plot": for symbol in args.symbols: print("plot [ %s ]: " %(symbol), end="") b = bands.Bands(symbol) b.fetch() try: p = plot.Plot(b)<|fim▁hole|> except Exception as ex: print("FAIL: (%s)"%(ex)) if args.action == "suggest": for symbol in args.symbols: print("suggest [ %s ]: " %(symbol), end="") b = bands.Bands(symbol) b.fetch() try: if args.strategy == "uponce": s = strategies.UpOnce(b) elif args.strategy == "downonce": s = strategies.DownOnce(b) elif args.strategy == "moreup": s = strategies.MoreUp(b) elif args.strategy == "moredown": s = strategies.MoreDown(b) print("YES" if s.invest() else "NO") except Exception as ex: print("FAIL: (%s)"%(ex))<|fim▁end|>
p.save() print("OK")
<|file_name|>jlink.py<|end_file_name|><|fim▁begin|># Copyright (c) 2017 Linaro Limited. # # SPDX-License-Identifier: Apache-2.0 '''Runner for debugging with JLink.''' from os import path import os from .core import ZephyrBinaryRunner, get_env_or_bail DEFAULT_JLINK_GDB_PORT = 2331 class JLinkBinaryRunner(ZephyrBinaryRunner): '''Runner front-end for the J-Link GDB server.''' def __init__(self, device, gdbserver='JLinkGDBServer', iface='swd', elf_name=None, gdb=None, gdb_port=DEFAULT_JLINK_GDB_PORT, tui=None, debug=False): super(JLinkBinaryRunner, self).__init__(debug=debug) self.device = device self.gdbserver_cmd = [gdbserver] self.iface = iface self.elf_name = elf_name self.gdb_cmd = [gdb] if gdb is not None else None self.gdb_port = gdb_port self.tui_arg = [tui] if tui is not None else [] def replaces_shell_script(shell_script, command): return (command in {'debug', 'debugserver'} and shell_script == 'jlink.sh') def create_from_env(command, debug): '''Create runner from environment. Required: - JLINK_DEVICE: device name Required for 'debug': - GDB: gdb to use - O: build output directory - KERNEL_ELF_NAME: zephyr kernel binary in ELF format Optional for 'debug': - TUI: if present, passed to gdb server used to flash Optional for 'debug', 'debugserver': - JLINK_GDBSERVER: default is JLinkGDBServer - GDB_PORT: default is 2331 - JLINK_IF: default is swd ''' device = get_env_or_bail('JLINK_DEVICE') gdb = os.environ.get('GDB', None) o = os.environ.get('O', None) elf = os.environ.get('KERNEL_ELF_NAME', None) elf_name = None if o is not None: if elf is not None: elf_name = path.join(o, elf) tui = os.environ.get('TUI', None) gdbserver = os.environ.get('JLINK_GDBSERVER', 'JLinkGDBServer') gdb_port = int(os.environ.get('GDB_PORT', str(DEFAULT_JLINK_GDB_PORT))) iface = os.environ.get('JLINK_IF', 'swd') return JLinkBinaryRunner(device, gdbserver=gdbserver, iface=iface, elf_name=elf_name, gdb=gdb, gdb_port=gdb_port, tui=tui, debug=debug) def print_gdbserver_message(self): print('JLink GDB server running on port {}'.format(self.gdb_port)) def run(self, command, **kwargs): if command not in {'debug', 'debugserver'}: raise ValueError('{} is not supported'.format(command)) server_cmd = (self.gdbserver_cmd + ['-port', str(self.gdb_port), '-if', self.iface, '-device', self.device, '-silent', '-singlerun']) if command == 'debugserver': self.print_gdbserver_message() self.check_call(server_cmd) else: if self.gdb_cmd is None: raise ValueError('Cannot debug; gdb is missing') if self.elf_name is None: raise ValueError('Cannot debug; elf is missing') client_cmd = (self.gdb_cmd + self.tui_arg + [self.elf_name] + ['-ex', 'target remote :{}'.format(self.gdb_port), '-ex', 'monitor halt', '-ex', 'load',<|fim▁hole|> self.print_gdbserver_message() self.run_server_and_client(server_cmd, client_cmd)<|fim▁end|>
'-ex', 'monitor reset'])
<|file_name|>section_objective.py<|end_file_name|><|fim▁begin|># Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: [email protected] # Maintained By: [email protected] from ggrc import db from .mixins import Mapping class SectionObjective(Mapping, db.Model): __tablename__ = 'section_objectives' @staticmethod def _extra_table_args(cls): return ( db.UniqueConstraint('section_id', 'objective_id'), db.Index('ix_section_id', 'section_id'), db.Index('ix_objective_id', 'objective_id'), ) section_id = db.Column(db.Integer, db.ForeignKey('sections.id'), nullable = False) objective_id = db.Column(db.Integer, db.ForeignKey('objectives.id'), nullable = False) _publish_attrs = [ 'section', 'objective', ] @classmethod def eager_query(cls): from sqlalchemy import orm <|fim▁hole|> def _display_name(self): return self.section.display_name + '<->' + self.objective.display_name<|fim▁end|>
query = super(SectionObjective, cls).eager_query() return query.options( orm.subqueryload('section'), orm.subqueryload('objective'))
<|file_name|>functions.py<|end_file_name|><|fim▁begin|>from collections import defaultdict from six import iteritems def invert_mapping(mapping): """ Invert a mapping dictionary Parameters ---------- mapping: dict Returns ------- """ inverted_mapping = defaultdict(list) for key, value in mapping.items(): if isinstance(value, (list, set)): for element in value: inverted_mapping[element].append(key) else: inverted_mapping[value].append(key) return inverted_mapping def generate_copy_id(base_id, collection, suffix="_copy"): """ Generate a new id that is not present in collection Parameters ---------- base_id: str, Original id while copying or New for new entries collection: dict or list suffix: str, Suffix that is added to the base id Returns ------- """ composite_id = str(base_id) + suffix new_id = composite_id n = 0 # Make sure there is no metabolite with the same id while new_id in collection: # Add number to end of id n += 1 new_id = composite_id + str(n) return new_id def get_annotation_to_item_map(list_of_items): """ Find model items with overlapping annotations Parameters ---------- item list_of_items Returns ------- """ annotation_to_item = defaultdict(list) for item in list_of_items: for annotation in item.annotation: annotation_to_item[annotation].append(item) return annotation_to_item def convert_to_bool(input_str): """ Convert string of boolean value to actual bolean PyQt5 stores boolean values as strings 'true' and 'false in the settings. In order to use those stored values they need to be converted back to the boolean values. Parameters ---------- input_str: str Returns ------- bool """ mapping = {"true": True, "false": False, "none": None} if isinstance(input_str, bool): return input_str elif not isinstance(input_str, str): raise TypeError("Input should be a string or boolean") else: return mapping[input_str.lower()] def check_charge_balance(metabolites): """ Check charge balance of the reaction """ # Check that charge is set for all metabolites if not all(x.charge is not None for x in metabolites.keys()): return None else: return sum([metabolite.charge * coefficient for metabolite, coefficient in iteritems(metabolites)]) def check_element_balance(metabolites): """ Check that the reaction is elementally balanced """ metabolite_elements = defaultdict(int) for metabolite, coefficient in iteritems(metabolites): for element, count in iteritems(metabolite.elements): metabolite_elements[element] += coefficient * count return {k: v for k, v in iteritems(metabolite_elements) if v != 0} def reaction_string(stoichiometry, use_metabolite_names=True): """Generate the reaction string """ attrib = "id" if use_metabolite_names: attrib = "name" educts = [(str(abs(value)), getattr(key, attrib)) for key, value in iteritems(stoichiometry) if value < 0.] products = [(str(abs(value)), getattr(key, attrib)) for key, value in iteritems(stoichiometry) if value > 0.] return " + ".join([" ".join(x) for x in educts])+" --> "+" + ".join([" ".join(x) for x in products]) def unbalanced_metabolites_to_string(in_dict): substrings = ['{0}: {1:.1f}'.format(*x) for x in in_dict.items()] return "<br>".join(substrings) def reaction_balance(metabolites): """ Check the balancing status of the stoichiometry Parameters ---------- metabolites : dict - Dictionary of metabolites with stoichiometric coefficnets Returns ------- charge_str : str or bool element_str : str or bool balanced : str or bool """ element_result = check_element_balance(metabolites) charge_result = check_charge_balance(metabolites) if charge_result is None: charge_str = "Unknown" elif charge_result == 0: charge_str = "OK" else: charge_str = str(charge_result) if not all(x.formula for x in metabolites.keys()): element_str = "Unknown" elif element_result == {}: element_str = "OK" else: element_str = unbalanced_metabolites_to_string(element_result) if len(metabolites) < 2: balanced = None elif element_str == "OK" and charge_str == "OK": balanced = True elif element_str not in ("OK", "Unknown") or charge_str not in ("OK", "Unknown"): balanced = False else: balanced = "Unknown" return charge_str, element_str, balanced def merge_groups_by_overlap(data): """ Merge sets Parameters ---------- data: list Returns ------- """ new_index = list(range(len(data))) mapping = dict() data = [set(m) for m in data] # Iterate over groups in data and merge groups # to the one with the lowest index for i, group in enumerate(data): for element in group: if element not in mapping: # Element has not been seen before # point element to current index mapping[element] = i continue else: # Get the new location location of the group # to which the element mapping points destination = new_location(new_index, mapping[element]) if destination == i: # Group has already been merged continue elif destination > i: # Merge to lowest index always destination, i = i, destination # Merge current group with the one # the item has been found in before data[destination].update(data[i]) data[i] = None # Store new index of group new_index[i] = destination i = destination # Filter out the empty groups return [g for g in data if g] def new_location(new_index, n): """ Find new location Iteratively follow pointers to new location. Parameters ---------- new_index: list, Should be initialized from range n: int Returns ------- int """ while new_index[n] != n: n = new_index[n] return n def unpack(iterable, cls): """ Unpack the value Parameters ---------- iterable cls Returns ------- """ if len(iterable) == 1: return iterable.pop() else: return cls(iterable) def text_is_different(input, state): """ Check if the input is different from output Test if the input is different to the output while ignoring the difference between None and empty string. Parameters ---------- input: str or None state: str Returns<|fim▁hole|> bool """ if not input and not state: return False else: return input != state def restore_state(object, state): """ Restore the state of an object Parameters ---------- object: Object which should be restored state: State from settings Returns ------- """ if state: object.restoreState(state) def restore_geometry(object, state): """ Restore the geometry of an object Parameters ---------- object: Object which should be restored state: State from settings Returns ------- """ if state: object.restoreGeometry(state) def split_dict_by_value(dictionary): """ Split dictionary by values This functions splits dictionary entries based on the value into positive, negative and zero dictionaries. Parameters ---------- dictionary: dict, Input dictionary Returns ------- positive: dict, Dictionary containg all items with positive value negative: dict, Dictionary cotaining all items with negative value zero: dict, Dictionary containing all items with zero value """ positive, negative, zero = {}, {}, {} for k, v in dictionary.items(): if v > 0.: positive[k] = v elif v < 0.: negative[k] = v else: zero[k] = v return positive, negative, zero<|fim▁end|>
-------
<|file_name|>bitstring.rs<|end_file_name|><|fim▁begin|>// The MIT License (MIT) // // Copyright (c) 2015 dinowernli // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. use std::string::String; use std::string::ToString; #[derive(Copy, Clone, Debug, PartialEq)] pub enum Bit { Zero, One, } /// Basic representation of a sequence of bits. pub struct Bitstring { // TODO(dinowernli): Investigate storing a Vec<u64> instead // and doing the conversoin to {Zero, One} upon request. bits: Vec<Bit>, } /// A type representing a sequence of Bits. impl Bitstring { pub fn new() -> Self { return Bitstring { bits: Vec::new(), }; } pub fn create_empty() -> Self { Bitstring::new()<|fim▁hole|> /// representation. The length of the resulting Bitstring is the /// smallest sequence of bits which can represent the value, i.e., /// log2(ceil(value)). pub fn create_from_u64(value: u64) -> Self { if value == 0 { return Bitstring::create_from_bits(vec!(Bit::Zero)); } // We compute the bit values by repeatedly getting the least // significant bit and then shifting. Since pushing to the end of // a vector is more efficient than pushing to the start, we keep // appending more significant bits and then flip everything once // at the end. let mut bits = Vec::new(); let mut remaining = value; while remaining > 0 { let least = remaining & 0x1; bits.push(if least == 1 { Bit::One } else { Bit::Zero }); remaining = remaining >> 1; } bits.reverse(); return Bitstring::create_from_bits(bits); } /// Returns a bitstring created from a sequence of characters, each of which /// must be either '0' or '1'. Panics if a bad string is passed. pub fn create_from_string(value: &str) -> Self { let mut bits = Vec::new(); for b in value.chars() { bits.push(match b { '0' => Bit::Zero, '1' => Bit::One, _ => panic!("Could not extract bitstring from {}", value), }); } return Bitstring::create_from_bits(bits); } fn create_from_bits(bits: Vec<Bit>) -> Self { return Bitstring { bits: bits }; } pub fn bits(&self) -> &Vec<Bit> { &self.bits } pub fn len(&self) -> usize { self.bits().len() } pub fn bit(&self, i: usize) -> Bit { self.bits[i] } pub fn push(&mut self, bit: Bit) { self.bits.push(bit); } /// Only valid if this bit string is not empty. pub fn pop(&mut self) -> Bit { return self.bits.pop().unwrap(); } } impl ToString for Bitstring { /// Returns the string representation of the bitstring, i.e., the /// string representation of 5 is '101'. fn to_string(&self) -> String { let mut result = String::new(); for bit in self.bits() { result.push(match *bit { Bit::Zero => '0', Bit::One => '1', }); } return result; } }<|fim▁end|>
} /// Encodes the supplied value as a Bitstring by taking its binary
<|file_name|>BusOutAction.java<|end_file_name|><|fim▁begin|>package com.simplegame.server.bus.client.io.action; import javax.annotation.Resource; import com.simplegame.core.action.annotation.ActionMapping; import com.simplegame.core.action.annotation.ActionWorker; import com.simplegame.core.message.Message; import com.simplegame.server.bus.client.io.command.ClientIoCommands; import com.simplegame.server.bus.client.io.service.IIoService; @ActionWorker<|fim▁hole|> @Resource private IIoService ioService; @ActionMapping(mapping = ClientIoCommands.ROLE_OUT) public void roleOut(Message message) { this.ioService.roleOut(message.getRoleId()); } }<|fim▁end|>
public class BusOutAction {
<|file_name|>generatetime.ts<|end_file_name|><|fim▁begin|>import { AsyncIterableX } from '../../asynciterable/asynciterablex'; import { generateTime as generateTimeStatic } from '../../asynciterable/generatetime';<|fim▁hole|>/** @nocollapse */ AsyncIterableX.generateTime = generateTimeStatic; declare module '../../asynciterable/asynciterablex' { // eslint-disable-next-line no-shadow namespace AsyncIterableX { export let generateTime: typeof generateTimeStatic; } }<|fim▁end|>
<|file_name|>models.py<|end_file_name|><|fim▁begin|>import mongoengine as db class BaseObject(db.Document): meta = {'allow_inheritance': True} <|fim▁hole|> revision = db.IntField(default=1)<|fim▁end|>
name = db.StringField(required=True) tags = db.ListField(db.StringField())
<|file_name|>showDashboard.py<|end_file_name|><|fim▁begin|>import json from app.api import bp from app.easyCI.scheduler import GitlabCIScheduler from flask import current_app, url_for, make_response, request from werkzeug.local import LocalProxy logger = LocalProxy(lambda: current_app.logger) @bp.route('/dashboard/', methods=['GET']) def dashboard(): url = url_for('api.dashboard') scheduler = GitlabCIScheduler() data = scheduler.get_pipelines() if not data: response = make_response() response.headers['Location'] = url response.status_code = 204 return response info = []<|fim▁hole|> break if i == 0: pipeline_id = pipeline["id"] pipeline_vars = scheduler.get_pipeline_vars(pipeline_id) print(pipeline_id, pipeline_vars) info.append({ "id" : pipeline["id"], "status" : pipeline["status"], "created_at": pipeline["created_at"], "updated_at": pipeline["updated_at"] }) return json.dumps(info), 200, {'Content-Type': 'text/json'}<|fim▁end|>
for (i, pipeline) in enumerate(data): if i > 10:
<|file_name|>HttpAPIRequest.java<|end_file_name|><|fim▁begin|>package de.hsmainz.pubapp.geocoder.controller; import com.google.gson.Gson; import de.hsmainz.pubapp.geocoder.model.ClientInputJson; import de.hsmainz.pubapp.geocoder.model.ErrorJson; import de.hsmainz.pubapp.geocoder.model.geojson.GeoJsonCollection; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.Locale; import java.util.ResourceBundle; /** * Interface for all different geocoder APIs * * @author Arno * @since 15.12.2016 */ public abstract class HttpAPIRequest { //**************************************** // CONSTANTS //**************************************** static final ResourceBundle lables = ResourceBundle.getBundle("lable", Locale.getDefault()); static final Logger logger = LogManager.getLogger(HttpGraphhopperRequest.class); //**************************************** // VARIABLES //**************************************** Gson gson = new Gson(); //**************************************** // INIT/CONSTRUCTOR //**************************************** //**************************************** // GETTER/SETTER //**************************************** //**************************************** // PUBLIC METHODS //**************************************** /** * Executes request to geocoder API and creates GeoJSON. Custom ClientJson is used for the input * * @param inputJson the request parameters combined in a custom ClientJson * @return API response converted to a String */ public String requestGeocoder(ClientInputJson inputJson) { String returnString; if (!validateInput(inputJson)) { returnString = gson.toJson(new ErrorJson(lables.getString("message_Input_Empty"))); } else { returnString = requestGeocoder(inputJson.getQueryString(), inputJson.getLocale()); } return returnString; } /** * Executes request to geocoder API and creates GeoJSON * * @param queryString the string containing the address * @param locale the string defining the used language * @return API response converted to a String */ public String requestGeocoder(String queryString, String locale) { String returnString; if (!validateInput(queryString)) { returnString = gson.toJson(new ErrorJson(lables.getString("message_Input_Empty"))); } else { try { URI uri = buildUri(queryString, locale); returnString = request(uri); } catch (URISyntaxException e) { logger.catching(e); returnString = gson.toJson(new ErrorJson(lables.getString("error_incorrect_URI"))); } } return returnString; } //**************************************** // PRIVATE METHODS //**************************************** /** * Creates the URI for API request * * @param queryString the string containing the address * @param locale the string defining the used language * @return Uri for geocoder request to graphhopper API */ abstract URI buildUri(String queryString, String locale) throws URISyntaxException; /** * Executes the request to the API * * @param uri the geocoder URL * @return the requested geoJSON * @throws throws an exception if the request fails */ abstract GeoJsonCollection doHttpGet(URI uri) throws IOException; /** * Method to catch exceptions and create ErrorJSONs * * @param uri * @return returns the GeoJSON or ErrorJSON as a String */ String request(URI uri) { String returnString; try { GeoJsonCollection geoJsonCollection = doHttpGet(uri); if (validateOutput(geoJsonCollection)) { returnString = gson.toJson(geoJsonCollection); } else { returnString = gson.toJson(new ErrorJson(lables.getString("message_no_location"))); } } catch (IOException e) { logger.catching(e); returnString = gson.toJson(new ErrorJson(lables.getString("error_API_request_Faild"))); } return returnString; } /** * validates the Input to reduce unnecessary request to API * * @param inputJson the InputJSON to be validated * @return returns true if InputJSON is valid */ boolean validateInput(ClientInputJson inputJson) { boolean returnValue = true; if (inputJson.getQueryString() == null || inputJson.getQueryString().isEmpty()) { returnValue = false; } if (inputJson.getLocale() == null || inputJson.getLocale().isEmpty()) { returnValue = false; } return returnValue; } /** * validates the Input to reduce unnecessary request to API * * @param inputString the Input String to be validated * @return true if Input String is not Empty */ boolean validateInput(String inputString) { boolean returnValue = true; if (inputString == null || inputString.isEmpty()) { returnValue = false; } return returnValue; } /** * validates the output from the API * * @param geoJsonCollection the API outputJSON to be validated * @return returns true if the outputJSON is not empty */ private boolean validateOutput(GeoJsonCollection geoJsonCollection) { return !geoJsonCollection.getFeatures().isEmpty(); } //**************************************** // INNER CLASSES<|fim▁hole|><|fim▁end|>
//**************************************** }
<|file_name|>oracle14.go<|end_file_name|><|fim▁begin|>// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !go1.5 // Package oracle contains the implementation of the oracle tool whose // command-line is provided by golang.org/x/tools/cmd/oracle. // // http://golang.org/s/oracle-design // http://golang.org/s/oracle-user-manual // package oracle // import "golang.org/x/tools/oracle" // This file defines oracle.Query, the entry point for the oracle tool. // The actual executable is defined in cmd/oracle. // TODO(adonovan): new queries // - show all statements that may update the selected lvalue // (local, global, field, etc). // - show all places where an object of type T is created // (&T{}, var t T, new(T), new(struct{array [3]T}), etc. import ( "fmt" "go/ast" "go/build" "go/parser" "go/token" "io" "path/filepath" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/loader" "golang.org/x/tools/go/pointer" "golang.org/x/tools/go/ssa" "golang.org/x/tools/go/types" "golang.org/x/tools/oracle/serial" ) type printfFunc func(pos interface{}, format string, args ...interface{}) // queryResult is the interface of each query-specific result type. type queryResult interface { toSerial(res *serial.Result, fset *token.FileSet) display(printf printfFunc) } // A QueryPos represents the position provided as input to a query: // a textual extent in the program's source code, the AST node it // corresponds to, and the package to which it belongs. // Instances are created by parseQueryPos. type queryPos struct { fset *token.FileSet start, end token.Pos // source extent of query path []ast.Node // AST path from query node to root of ast.File exact bool // 2nd result of PathEnclosingInterval info *loader.PackageInfo // type info for the queried package (nil for fastQueryPos) } // TypeString prints type T relative to the query position. func (qpos *queryPos) typeString(T types.Type) string { return types.TypeString(T, types.RelativeTo(qpos.info.Pkg)) } // ObjectString prints object obj relative to the query position. func (qpos *queryPos) objectString(obj types.Object) string { return types.ObjectString(obj, types.RelativeTo(qpos.info.Pkg)) } // SelectionString prints selection sel relative to the query position. func (qpos *queryPos) selectionString(sel *types.Selection) string { return types.SelectionString(sel, types.RelativeTo(qpos.info.Pkg)) } // A Query specifies a single oracle query. type Query struct { Mode string // query mode ("callers", etc) Pos string // query position Build *build.Context // package loading configuration // pointer analysis options Scope []string // main packages in (*loader.Config).FromArgs syntax PTALog io.Writer // (optional) pointer-analysis log file Reflection bool // model reflection soundly (currently slow). // Populated during Run() Fset *token.FileSet result queryResult } // Serial returns an instance of serial.Result, which implements the // {xml,json}.Marshaler interfaces so that query results can be // serialized as JSON or XML. // func (q *Query) Serial() *serial.Result { resj := &serial.Result{Mode: q.Mode} q.result.toSerial(resj, q.Fset) return resj } // WriteTo writes the oracle query result res to out in a compiler diagnostic format. func (q *Query) WriteTo(out io.Writer) { printf := func(pos interface{}, format string, args ...interface{}) { fprintf(out, q.Fset, pos, format, args...) } q.result.display(printf) } // Run runs an oracle query and populates its Fset and Result. func Run(q *Query) error { switch q.Mode { case "callees": return callees(q) case "callers": return callers(q) case "callstack": return callstack(q) case "peers": return peers(q) case "pointsto": return pointsto(q) case "whicherrs": return whicherrs(q) case "definition": return definition(q) case "describe": return describe(q) case "freevars": return freevars(q) case "implements": return implements(q) case "referrers": return referrers(q) case "what": return what(q) default: return fmt.Errorf("invalid mode: %q", q.Mode) } } func setPTAScope(lconf *loader.Config, scope []string) error { if len(scope) == 0 { return fmt.Errorf("no packages specified for pointer analysis scope") } // Determine initial packages for PTA. args, err := lconf.FromArgs(scope, true) if err != nil { return err } if len(args) > 0 { return fmt.Errorf("surplus arguments: %q", args) } return nil } // Create a pointer.Config whose scope is the initial packages of lprog // and their dependencies. func setupPTA(prog *ssa.Program, lprog *loader.Program, ptaLog io.Writer, reflection bool) (*pointer.Config, error) { // TODO(adonovan): the body of this function is essentially // duplicated in all go/pointer clients. Refactor. // For each initial package (specified on the command line), // if it has a main function, analyze that, // otherwise analyze its tests, if any. var testPkgs, mains []*ssa.Package for _, info := range lprog.InitialPackages() { initialPkg := prog.Package(info.Pkg) // Add package to the pointer analysis scope. if initialPkg.Func("main") != nil { mains = append(mains, initialPkg) } else { testPkgs = append(testPkgs, initialPkg) } } if testPkgs != nil { if p := prog.CreateTestMainPackage(testPkgs...); p != nil { mains = append(mains, p) } } if mains == nil { return nil, fmt.Errorf("analysis scope has no main and no tests") } return &pointer.Config{ Log: ptaLog, Reflection: reflection, Mains: mains, }, nil } // importQueryPackage finds the package P containing the // query position and tells conf to import it. // It returns the package's path. func importQueryPackage(pos string, conf *loader.Config) (string, error) { fqpos, err := fastQueryPos(pos) if err != nil { return "", err // bad query } filename := fqpos.fset.File(fqpos.start).Name() // This will not work for ad-hoc packages // such as $GOROOT/src/net/http/triv.go. // TODO(adonovan): ensure we report a clear error. _, importPath, err := guessImportPath(filename, conf.Build) if err != nil { return "", err // can't find GOPATH dir } if importPath == "" { return "", fmt.Errorf("can't guess import path from %s", filename) } // Check that it's possible to load the queried package. // (e.g. oracle tests contain different 'package' decls in same dir.) // Keep consistent with logic in loader/util.go! cfg2 := *conf.Build cfg2.CgoEnabled = false bp, err := cfg2.Import(importPath, "", 0) if err != nil { return "", err // no files for package } switch pkgContainsFile(bp, filename) { case 'T': conf.ImportWithTests(importPath) case 'X': conf.ImportWithTests(importPath) importPath += "_test" // for TypeCheckFuncBodies case 'G': conf.Import(importPath) default: return "", fmt.Errorf("package %q doesn't contain file %s", importPath, filename) } conf.TypeCheckFuncBodies = func(p string) bool { return p == importPath } return importPath, nil } // pkgContainsFile reports whether file was among the packages Go // files, Test files, eXternal test files, or not found. func pkgContainsFile(bp *build.Package, filename string) byte { for i, files := range [][]string{bp.GoFiles, bp.TestGoFiles, bp.XTestGoFiles} { for _, file := range files { if sameFile(filepath.Join(bp.Dir, file), filename) { return "GTX"[i] } } } return 0 // not found } // ParseQueryPos parses the source query position pos and returns the // AST node of the loaded program lprog that it identifies. // If needExact, it must identify a single AST subtree; // this is appropriate for queries that allow fairly arbitrary syntax, // e.g. "describe". // func parseQueryPos(lprog *loader.Program, posFlag string, needExact bool) (*queryPos, error) { filename, startOffset, endOffset, err := parsePosFlag(posFlag) if err != nil { return nil, err } start, end, err := findQueryPos(lprog.Fset, filename, startOffset, endOffset) if err != nil { return nil, err } info, path, exact := lprog.PathEnclosingInterval(start, end) if path == nil { return nil, fmt.Errorf("no syntax here") } if needExact && !exact { return nil, fmt.Errorf("ambiguous selection within %s", astutil.NodeDescription(path[0])) } return &queryPos{lprog.Fset, start, end, path, exact, info}, nil } // ---------- Utilities ---------- // allowErrors causes type errors to be silently ignored. // (Not suitable if SSA construction follows.) func allowErrors(lconf *loader.Config) { ctxt := *lconf.Build // copy ctxt.CgoEnabled = false lconf.Build = &ctxt lconf.AllowErrors = true // AllErrors makes the parser always return an AST instead of // bailing out after 10 errors and returning an empty ast.File. lconf.ParserMode = parser.AllErrors lconf.TypeChecker.Error = func(err error) {} } // ptrAnalysis runs the pointer analysis and returns its result. func ptrAnalysis(conf *pointer.Config) *pointer.Result { result, err := pointer.Analyze(conf) if err != nil { panic(err) // pointer analysis internal error } return result<|fim▁hole|> func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } // deref returns a pointer's element type; otherwise it returns typ. func deref(typ types.Type) types.Type { if p, ok := typ.Underlying().(*types.Pointer); ok { return p.Elem() } return typ } // fprintf prints to w a message of the form "location: message\n" // where location is derived from pos. // // pos must be one of: // - a token.Pos, denoting a position // - an ast.Node, denoting an interval // - anything with a Pos() method: // ssa.Member, ssa.Value, ssa.Instruction, types.Object, pointer.Label, etc. // - a QueryPos, denoting the extent of the user's query. // - nil, meaning no position at all. // // The output format is is compatible with the 'gnu' // compilation-error-regexp in Emacs' compilation mode. // TODO(adonovan): support other editors. // func fprintf(w io.Writer, fset *token.FileSet, pos interface{}, format string, args ...interface{}) { var start, end token.Pos switch pos := pos.(type) { case ast.Node: start = pos.Pos() end = pos.End() case token.Pos: start = pos end = start case interface { Pos() token.Pos }: start = pos.Pos() end = start case *queryPos: start = pos.start end = pos.end case nil: // no-op default: panic(fmt.Sprintf("invalid pos: %T", pos)) } if sp := fset.Position(start); start == end { // (prints "-: " for token.NoPos) fmt.Fprintf(w, "%s: ", sp) } else { ep := fset.Position(end) // The -1 below is a concession to Emacs's broken use of // inclusive (not half-open) intervals. // Other editors may not want it. // TODO(adonovan): add an -editor=vim|emacs|acme|auto // flag; auto uses EMACS=t / VIM=... / etc env vars. fmt.Fprintf(w, "%s:%d.%d-%d.%d: ", sp.Filename, sp.Line, sp.Column, ep.Line, ep.Column-1) } fmt.Fprintf(w, format, args...) io.WriteString(w, "\n") }<|fim▁end|>
}
<|file_name|>FrequencySummarizer.py<|end_file_name|><|fim▁begin|>from nltk.tokenize import sent_tokenize,word_tokenize from nltk.corpus import stopwords from collections import defaultdict from string import punctuation from heapq import nlargest import re """ Modified from http://glowingpython.blogspot.co.uk/2014/09/text-summarization-with-nltk.html """ class FrequencySummarizer: def __init__(self, low_thresh=0.1, high_thresh=0.9): """ Initialize the text summarizer. Words that have a frequency term lower than low_thresh or higer than high_thresh will be ignored. """ ignore = ['fig','figure','ibid', 'et al','cf','NB','N.B.'] self._low_thresh = low_thresh self._high_thresh = high_thresh self._stopwords = set(stopwords.words('english') + list(punctuation) + list(ignore)) def _compute_frequencies(self, word_tk): freq = defaultdict(int) for s in word_tk: for word in s: if word not in self._stopwords: freq[word] += 1 # frequencies normalization and fitering m = float(max(freq.values()))<|fim▁hole|> if freq[w] >= self._high_thresh or freq[w] <= self._low_thresh: del freq[w] return freq def summarize(self, text, n): """ Return a list of n sentences which represent the summary of text. """ text = "".join([unicode(x) for x in text]) sents = sent_tokenize(text) if n > len(sents): n = len(sents) word_tk = [word_tokenize(s.lower()) for s in sents] self._freq = self._compute_frequencies(word_tk) ranking = defaultdict(int) for i,sent in enumerate(word_tk): for w in sent: if w in self._freq and len(w)>4: #Only count words of length>4 as significant ranking[i] += self._freq[w] sentsindx = self._rank(ranking, n) return [sents[j].encode('ascii', errors='backslashreplace') for j in sentsindx] def _rank(self, ranking, n): """ return the first n sentences with highest ranking """ return nlargest(n, ranking, key=ranking.get)<|fim▁end|>
for w in freq.keys(): freq[w] = freq[w]/m
<|file_name|>HelloWorld.rs<|end_file_name|><|fim▁begin|>fn main() {<|fim▁hole|> println!("How are you?") }<|fim▁end|>
println!("Hello world!");
<|file_name|>test_hybrid_recommender.py<|end_file_name|><|fim▁begin|># This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. """ Test cases for the TAAR Hybrid recommender """ from taar.recommenders.hybrid_recommender import CuratedRecommender from taar.recommenders.hybrid_recommender import HybridRecommender from taar.recommenders.ensemble_recommender import EnsembleRecommender from taar.recommenders.s3config import TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY # from taar.recommenders.hybrid_recommender import ENSEMBLE_WEIGHTS from .test_ensemblerecommender import install_mock_ensemble_data from .mocks import MockRecommenderFactory import json from moto import mock_s3 import boto3 def install_no_curated_data(ctx): ctx = ctx.child() conn = boto3.resource("s3", region_name="us-west-2") conn.create_bucket(Bucket=TAAR_WHITELIST_BUCKET) conn.Object(TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY).put(Body="") return ctx def install_mock_curated_data(ctx): mock_data = [] for i in range(20): mock_data.append(str(i) * 16) ctx = ctx.child() conn = boto3.resource("s3", region_name="us-west-2") conn.create_bucket(Bucket=TAAR_WHITELIST_BUCKET) conn.Object(TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY).put( Body=json.dumps(mock_data) ) return ctx def install_ensemble_fixtures(ctx): ctx = install_mock_ensemble_data(ctx) factory = MockRecommenderFactory() ctx["recommender_factory"] = factory ctx["recommender_map"] = { "collaborative": factory.create("collaborative"), "similarity": factory.create("similarity"), "locale": factory.create("locale"), } ctx["ensemble_recommender"] = EnsembleRecommender(ctx.child()) return ctx @mock_s3 def test_curated_can_recommend(test_ctx): ctx = install_no_curated_data(test_ctx) r = CuratedRecommender(ctx) # CuratedRecommender will always recommend something no matter # what assert r.can_recommend({}) assert r.can_recommend({"installed_addons": []}) @mock_s3 def test_curated_recommendations(test_ctx): ctx = install_mock_curated_data(test_ctx) r = CuratedRecommender(ctx) # CuratedRecommender will always recommend something no matter # what for LIMIT in range(1, 5): guid_list = r.recommend({"client_id": "000000"}, limit=LIMIT) # The curated recommendations should always return with some kind # of recommendations assert len(guid_list) == LIMIT @mock_s3 def test_hybrid_recommendations(test_ctx): # verify that the recommendations mix the curated and # ensemble results ctx = install_mock_curated_data(test_ctx) ctx = install_ensemble_fixtures(ctx) r = HybridRecommender(ctx) # Test that we can generate lists of results for LIMIT in range(4, 8): guid_list = r.recommend({"client_id": "000000"}, limit=LIMIT) # The curated recommendations should always return with some kind # of recommendations assert len(guid_list) == LIMIT # Test that the results are actually mixed guid_list = r.recommend({"client_id": "000000"}, limit=4) # A mixed list will have two recommendations with weight > 1.0 # (ensemble) and 2 with exactly weight 1.0 from the curated list <|fim▁hole|> assert guid_list[1][1] > 1.0 assert guid_list[2][1] == 1.0 assert guid_list[3][1] == 1.0<|fim▁end|>
assert guid_list[0][1] > 1.0
<|file_name|>SQL.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 # Copyright 2009-2017 BHG http://bw.org/ import sqlite3 def main(): print('connect') db = sqlite3.connect('db-api.db') cur = db.cursor() print('create') cur.execute("DROP TABLE IF EXISTS test") cur.execute(""" CREATE TABLE test ( id INTEGER PRIMARY KEY, string TEXT, number INTEGER ) """) print('insert row') cur.execute(""" INSERT INTO test (string, number) VALUES ('one', 1) """) print('insert row') cur.execute(""" INSERT INTO test (string, number) VALUES ('two', 2) """) print('insert row') <|fim▁hole|> INSERT INTO test (string, number) VALUES ('three', 3) """) print('commit') db.commit() print('count') cur.execute("SELECT COUNT(*) FROM test") count = cur.fetchone()[0] print(f'there are {count} rows in the table.') print('read') for row in cur.execute("SELECT * FROM test"): print(row) print('drop') cur.execute("DROP TABLE test") print('close') db.close() if __name__ == '__main__': main()<|fim▁end|>
cur.execute("""
<|file_name|>api.go<|end_file_name|><|fim▁begin|>// Copyright 2016 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. package api import ( "errors" "fmt" "io" "net/http" "regexp" "strings" "sync" "github.com/lab2528/go-oneTime/common" "github.com/lab2528/go-oneTime/log" "github.com/lab2528/go-oneTime/swarm/storage" ) var ( hashMatcher = regexp.MustCompile("^[0-9A-Fa-f]{64}") slashes = regexp.MustCompile("/+") domainAndVersion = regexp.MustCompile("[@:;,]+") ) type Resolver interface { Resolve(string) (common.Hash, error) } /* Api implements webserver/file system related content storage and retrieval on top of the dpa it is the public interface of the dpa which is included in the ethereum stack */ type Api struct { dpa *storage.DPA dns Resolver } //the api constructor initialises func NewApi(dpa *storage.DPA, dns Resolver) (self *Api) { self = &Api{ dpa: dpa, dns: dns, } return } // DPA reader API func (self *Api) Retrieve(key storage.Key) storage.LazySectionReader { return self.dpa.Retrieve(key) } func (self *Api) Store(data io.Reader, size int64, wg *sync.WaitGroup) (key storage.Key, err error) { return self.dpa.Store(data, size, wg, nil) } type ErrResolve error // DNS Resolver func (self *Api) Resolve(uri *URI) (storage.Key, error) { log.Trace(fmt.Sprintf("Resolving : %v", uri.Addr)) if hashMatcher.MatchString(uri.Addr) { log.Trace(fmt.Sprintf("addr is a hash: %q", uri.Addr)) return storage.Key(common.Hex2Bytes(uri.Addr)), nil }<|fim▁hole|> return nil, fmt.Errorf("unable to resolve addr %q, resolver not configured", uri.Addr) } hash, err := self.dns.Resolve(uri.Addr) if err != nil { log.Warn(fmt.Sprintf("DNS error resolving addr %q: %s", uri.Addr, err)) return nil, ErrResolve(err) } log.Trace(fmt.Sprintf("addr lookup: %v -> %v", uri.Addr, hash)) return hash[:], nil } // Put provides singleton manifest creation on top of dpa store func (self *Api) Put(content, contentType string) (storage.Key, error) { r := strings.NewReader(content) wg := &sync.WaitGroup{} key, err := self.dpa.Store(r, int64(len(content)), wg, nil) if err != nil { return nil, err } manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType) r = strings.NewReader(manifest) key, err = self.dpa.Store(r, int64(len(manifest)), wg, nil) if err != nil { return nil, err } wg.Wait() return key, nil } // Get uses iterative manifest retrieval and prefix matching // to resolve path to content using dpa retrieve // it returns a section reader, mimeType, status and an error func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionReader, mimeType string, status int, err error) { trie, err := loadManifest(self.dpa, key, nil) if err != nil { log.Warn(fmt.Sprintf("loadManifestTrie error: %v", err)) return } log.Trace(fmt.Sprintf("getEntry(%s)", path)) entry, _ := trie.getEntry(path) if entry != nil { key = common.Hex2Bytes(entry.Hash) status = entry.Status mimeType = entry.ContentType log.Trace(fmt.Sprintf("content lookup key: '%v' (%v)", key, mimeType)) reader = self.dpa.Retrieve(key) } else { status = http.StatusNotFound err = fmt.Errorf("manifest entry for '%s' not found", path) log.Warn(fmt.Sprintf("%v", err)) } return } func (self *Api) Modify(key storage.Key, path, contentHash, contentType string) (storage.Key, error) { quitC := make(chan bool) trie, err := loadManifest(self.dpa, key, quitC) if err != nil { return nil, err } if contentHash != "" { entry := newManifestTrieEntry(&ManifestEntry{ Path: path, ContentType: contentType, }, nil) entry.Hash = contentHash trie.addEntry(entry, quitC) } else { trie.deleteEntry(path, quitC) } if err := trie.recalcAndStore(); err != nil { return nil, err } return trie.hash, nil }<|fim▁end|>
if uri.Immutable() { return nil, errors.New("refusing to resolve immutable address") } if self.dns == nil {
<|file_name|>gen_rtl.py<|end_file_name|><|fim▁begin|># Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 """Generate SystemVerilog designs from IpBlock object""" import logging as log import os from typing import Dict, Optional, Tuple from mako import exceptions # type: ignore from mako.template import Template # type: ignore from pkg_resources import resource_filename from .ip_block import IpBlock from .lib import check_int from .multi_register import MultiRegister from .reg_base import RegBase from .register import Register def escape_name(name: str) -> str: return name.lower().replace(' ', '_') def make_box_quote(msg: str, indent: str = ' ') -> str: hr = indent + ('/' * (len(msg) + 6)) middle = indent + '// ' + msg + ' //' return '\n'.join([hr, middle, hr]) def _get_awparam_name(iface_name: Optional[str]) -> str: return (iface_name or 'Iface').capitalize() + 'Aw' def get_addr_widths(block: IpBlock) -> Dict[Optional[str], Tuple[str, int]]: '''Return the address widths for the device interfaces Returns a dictionary keyed by interface name whose values are pairs: (paramname, width) where paramname is IfaceAw for an unnamed interface and FooAw for an interface called foo. This is constructed in the same order as block.reg_blocks. If there is a single device interface and that interface is unnamed, use the more general parameter name "BlockAw". ''' assert block.reg_blocks if len(block.reg_blocks) == 1 and None in block.reg_blocks: return {None: ('BlockAw', block.reg_blocks[None].get_addr_width())} return {name: (_get_awparam_name(name), rb.get_addr_width()) for name, rb in block.reg_blocks.items()} def get_type_name_pfx(block: IpBlock, iface_name: Optional[str]) -> str: return block.name.lower() + ('' if iface_name is None else '_{}'.format(iface_name.lower())) def get_r0(reg: RegBase) -> Register: '''Get a Register representing an entry in the RegBase''' if isinstance(reg, Register): return reg else: assert isinstance(reg, MultiRegister) return reg.reg def get_iface_tx_type(block: IpBlock, iface_name: Optional[str], hw2reg: bool) -> str: x2x = 'hw2reg' if hw2reg else 'reg2hw' pfx = get_type_name_pfx(block, iface_name) return '_'.join([pfx, x2x, 't']) def get_reg_tx_type(block: IpBlock, reg: RegBase, hw2reg: bool) -> str: '''Get the name of the hw2reg or reg2hw type for reg''' if isinstance(reg, Register): r0 = reg type_suff = 'reg_t' else: assert isinstance(reg, MultiRegister) r0 = reg.reg type_suff = 'mreg_t' x2x = 'hw2reg' if hw2reg else 'reg2hw' return '_'.join([block.name.lower(), x2x, r0.name.lower(), type_suff]) def gen_rtl(block: IpBlock, outdir: str) -> int: # Read Register templates reg_top_tpl = Template( filename=resource_filename('reggen', 'reg_top.sv.tpl')) reg_pkg_tpl = Template( filename=resource_filename('reggen', 'reg_pkg.sv.tpl')) # Generate <block>_reg_pkg.sv # # This defines the various types used to interface between the *_reg_top # module(s) and the block itself. reg_pkg_path = os.path.join(outdir, block.name.lower() + "_reg_pkg.sv") with open(reg_pkg_path, 'w', encoding='UTF-8') as fout: try: fout.write(reg_pkg_tpl.render(block=block)) except: # noqa F722 for template Exception handling log.error(exceptions.text_error_template().render()) return 1 # Generate the register block implementation(s). For a device interface # with no name we generate the register module "<block>_reg_top" (writing # to <block>_reg_top.sv). In any other case, we also need the interface # name, giving <block>_<ifname>_reg_top. lblock = block.name.lower() for if_name, rb in block.reg_blocks.items(): if if_name is None: mod_base = lblock else: mod_base = lblock + '_' + if_name.lower() mod_name = mod_base + '_reg_top' reg_top_path = os.path.join(outdir, mod_name + '.sv') with open(reg_top_path, 'w', encoding='UTF-8') as fout: try: fout.write(reg_top_tpl.render(block=block, mod_base=mod_base, mod_name=mod_name, if_name=if_name, rb=rb)) except: # noqa F722 for template Exception handling log.error(exceptions.text_error_template().render()) return 1 return 0 <|fim▁hole|> '''Render a parameter value as used for the destination type The value is itself a string but we have already checked that if dst_type happens to be "int" or "int unsigned" then it can be parsed as an integer. If dst_type is "int unsigned" and the value is larger than 2^31 then explicitly generate a 32-bit hex value. This allows 32-bit literals whose top bits are set (which can't be written as bare integers in SystemVerilog without warnings, because those are interpreted as ints). ''' if dst_type == 'int unsigned': # This shouldn't fail because we've already checked it in # _parse_parameter in params.py int_val = check_int(value, "integer parameter") if int_val >= (1 << 31): return "32'h{:08x}".format(int_val) return value<|fim▁end|>
def render_param(dst_type: str, value: str) -> str:
<|file_name|>karma.conf.js<|end_file_name|><|fim▁begin|><|fim▁hole|>'use strict'; module.exports = function(config) { config.set({ files: [ 'tests/main.js', {pattern: 'app/js/**/*.js', included: false}, {pattern: 'app/bower_components/**/*.js', included: false}, {pattern: 'tests/specs/**/*.js', included: false}, {pattern: 'tests/fixtures/**/*.js', included: false} ], basePath: '../', frameworks: ['jasmine', 'requirejs'], reporters: ['progress'], runnerPort: 9000, singleRun: true, browsers: ['PhantomJS', 'Chrome'], logLevel: 'ERROR' }); };<|fim▁end|>
<|file_name|>LanguageService.js<|end_file_name|><|fim▁begin|>app .service('LanguageService', function LanguageService(ExchangeService) { this.translate = (label) => ExchangeService.i18n().__(label);<|fim▁hole|><|fim▁end|>
});
<|file_name|>event_simulator.py<|end_file_name|><|fim▁begin|>from collections import defaultdict import copy import datetime import json from appengine_fixture_loader.loader import load_fixture from google.appengine.ext import ndb from helpers.event_details_manipulator import EventDetailsManipulator from helpers.match_helper import MatchHelper from helpers.match_manipulator import MatchManipulator from models.event import Event from models.event_details import EventDetails from models.match import Match class EventSimulator(object): """ Steps through an event in time. At step = 0, only the Event exists: (step 0) Add all unplayed qual matches (step 1, substep n) Add results of each of the n qual matches + rankings being updated (if has_event_details) (step 2) Add alliance selections (if has_event_details) (step 3) Add unplayed QF matches (step 4, substep n) Add results of each of the n QF matches + update SF matches with advancing alliances (if not batch_advance) + update alliance selection backups (if has_event_details) (step 5) Add unplayed SF matches (if batch_advance) (step 6, substep n) Add results of each of the n SF matches + update F matches with advancing alliances (if not batch_advance) + update alliance selection backups (if has_event_details) (step 7) Add unplayed F matches (if batch_advance) (step 8, substep n) Add results of each of the n F matches + update alliance selection backups (if has_event_details) """ def __init__(self, has_event_details=True, batch_advance=False): self._step = 0 self._substep = 0 # whether to update rankings and alliance selections self._has_event_details = has_event_details # whether to update next playoff level all at once, or as winners are determined self._batch_advance = batch_advance # Load and save complete data load_fixture('test_data/fixtures/2016nytr_event_team_status.json', kind={'EventDetails': EventDetails, 'Event': Event, 'Match': Match}, post_processor=self._event_key_adder) event = Event.get_by_id('2016nytr') # Add 3rd matches that never got played unplayed_matches = [ Match( id='2016nytr_qf1m3', year=2016, event=event.key, comp_level='qf', set_number=1, match_number=3, alliances_json=json.dumps({ 'red': { 'teams': ['frc3990', 'frc359', 'frc4508'], 'score': -1, }, 'blue': { 'teams': ['frc3044', 'frc4930', 'frc4481'], 'score': -1, } }), time=datetime.datetime(2016, 3, 19, 18, 34), ), Match( id='2016nytr_qf3m3', year=2016, event=event.key, comp_level='qf', set_number=3, match_number=3, alliances_json=json.dumps({ 'red': { 'teams': ['frc20', 'frc5254', 'frc229'], 'score': -1, }, 'blue': { 'teams': ['frc3003', 'frc358', 'frc527'], 'score': -1, } }), time=datetime.datetime(2016, 3, 19, 18, 48), ), Match( id='2016nytr_sf1m3', year=2016, event=event.key, comp_level='sf', set_number=1, match_number=3, alliances_json=json.dumps({ 'red': { 'teams': ['frc3990', 'frc359', 'frc4508'], 'score': -1, }, 'blue': { 'teams': ['frc5240', 'frc3419', 'frc663'], 'score': -1, } }), time=datetime.datetime(2016, 3, 19, 19, 42), ) ] self._event_details = event.details self._alliance_selections_without_backup = copy.deepcopy(event.details.alliance_selections) self._alliance_selections_without_backup[1]['backup'] = None self._played_matches = MatchHelper.organizeMatches(event.matches) self._all_matches = MatchHelper.organizeMatches(event.matches + unplayed_matches) # Delete data event.details.key.delete() ndb.delete_multi([match.key for match in event.matches]) ndb.get_context().clear_cache() # Used to keep track of non-batch advancement self._advancement_alliances = defaultdict(dict) def _event_key_adder(self, obj): obj.event = ndb.Key(Event, '2016nytr') def _update_rankings(self): """ Generates and saves fake rankings """ event = Event.get_by_id('2016nytr') team_wins = defaultdict(int) team_losses = defaultdict(int) team_ties = defaultdict(int) teams = set() for match in event.matches: if match.comp_level == 'qm': for alliance in ['red', 'blue']: for team in match.alliances[alliance]['teams']: teams.add(team) if match.has_been_played: if alliance == match.winning_alliance: team_wins[team] += 1 elif match.winning_alliance == '': team_ties[team] += 1 else: team_losses[team] += 1 rankings = [] for team in sorted(teams): wins = team_wins[team] losses = team_losses[team] ties = team_ties[team] rankings.append({ 'team_key': team, 'record': { 'wins': wins, 'losses': losses, 'ties': ties, }, 'matches_played': wins + losses + ties, 'dq': 0, 'sort_orders': [2 * wins + ties, 0, 0, 0, 0], 'qual_average': None, }) rankings = sorted(rankings, key=lambda r: -r['sort_orders'][0]) for i, ranking in enumerate(rankings): ranking['rank'] = i + 1 EventDetailsManipulator.createOrUpdate(EventDetails( id='2016nytr', rankings2=rankings, )) def step(self): event = Event.get_by_id('2016nytr') if self._step == 0: # Qual match schedule added for match in copy.deepcopy(self._all_matches['qm']): for alliance in ['red', 'blue']: match.alliances[alliance]['score'] = -1 match.alliances_json = json.dumps(match.alliances) match.score_breakdown_json = None match.actual_time = None MatchManipulator.createOrUpdate(match) self._step += 1 elif self._step == 1: # After each qual match MatchManipulator.createOrUpdate(self._played_matches['qm'][self._substep]) if self._substep < len(self._played_matches['qm']) - 1: self._substep += 1 else: self._step += 1 self._substep = 0 EventDetailsManipulator.createOrUpdate(EventDetails(id='2016nytr')) elif self._step == 2: # After alliance selections EventDetailsManipulator.createOrUpdate(EventDetails( id='2016nytr', alliance_selections=self._alliance_selections_without_backup )) self._step += 1 elif self._step == 3: # QF schedule added for match in copy.deepcopy(self._all_matches['qf']): for alliance in ['red', 'blue']: match.alliances[alliance]['score'] = -1 match.alliances_json = json.dumps(match.alliances) match.score_breakdown_json = None match.actual_time = None MatchManipulator.createOrUpdate(match) self._step += 1 elif self._step == 4: # After each QF match new_match = MatchHelper.play_order_sort_matches(self._played_matches['qf'])[self._substep] MatchManipulator.createOrUpdate(new_match) if not self._batch_advance: win_counts = { 'red': 0, 'blue': 0, } for i in xrange(new_match.match_number): win_counts[Match.get_by_id( Match.renderKeyName( new_match.event.id(), new_match.comp_level, new_match.set_number, i+1)).winning_alliance] += 1 for alliance, wins in win_counts.items(): if wins == 2: s = new_match.set_number if s in {1, 2}: self._advancement_alliances['sf1']['red' if s == 1 else 'blue'] = new_match.alliances[alliance]['teams'] elif s in {3, 4}: self._advancement_alliances['sf2']['red' if s == 3 else 'blue'] = new_match.alliances[alliance]['teams'] else: raise Exception("Invalid set number: {}".format(s)) for match_set, alliances in self._advancement_alliances.items(): if match_set.startswith('sf'): for i in xrange(3): for match in copy.deepcopy(self._all_matches['sf']): key = '2016nytr_{}m{}'.format(match_set, i+1) if match.key.id() == key: for color in ['red', 'blue']: match.alliances[color]['score'] = -1 match.alliances[color]['teams'] = alliances.get(color, []) match.alliances_json = json.dumps(match.alliances) match.score_breakdown_json = None match.actual_time = None MatchManipulator.createOrUpdate(match) if self._substep < len(self._played_matches['qf']) - 1: self._substep += 1 else: self._step += 1 if self._batch_advance else 2 self._substep = 0 elif self._step == 5: # SF schedule added if self._batch_advance: for match in copy.deepcopy(self._all_matches['sf']): for alliance in ['red', 'blue']: match.alliances[alliance]['score'] = -1 match.alliances_json = json.dumps(match.alliances) match.score_breakdown_json = None match.actual_time = None MatchManipulator.createOrUpdate(match) self._step += 1 elif self._step == 6: # After each SF match new_match = MatchHelper.play_order_sort_matches(self._played_matches['sf'])[self._substep] MatchManipulator.createOrUpdate(new_match) if not self._batch_advance: win_counts = { 'red': 0, 'blue': 0, }<|fim▁hole|> for i in xrange(new_match.match_number): win_counts[Match.get_by_id( Match.renderKeyName( new_match.event.id(), new_match.comp_level, new_match.set_number, i+1)).winning_alliance] += 1 for alliance, wins in win_counts.items(): if wins == 2: self._advancement_alliances['f1']['red' if new_match.set_number == 1 else 'blue'] = new_match.alliances[alliance]['teams'] for match_set, alliances in self._advancement_alliances.items(): if match_set.startswith('f'): for i in xrange(3): for match in copy.deepcopy(self._all_matches['f']): key = '2016nytr_{}m{}'.format(match_set, i+1) if match.key.id() == key: for color in ['red', 'blue']: match.alliances[color]['score'] = -1 match.alliances[color]['teams'] = alliances.get(color, []) match.alliances_json = json.dumps(match.alliances) match.score_breakdown_json = None match.actual_time = None MatchManipulator.createOrUpdate(match) # Backup robot introduced if self._substep == 3: EventDetailsManipulator.createOrUpdate(EventDetails( id='2016nytr', alliance_selections=self._event_details.alliance_selections )) if self._substep < len(self._played_matches['sf']) - 1: self._substep += 1 else: self._step += 1 if self._batch_advance else 2 self._substep = 0 elif self._step == 7: # F schedule added if self._batch_advance: for match in copy.deepcopy(self._all_matches['f']): for alliance in ['red', 'blue']: match.alliances[alliance]['score'] = -1 match.alliances_json = json.dumps(match.alliances) match.score_breakdown_json = None match.actual_time = None MatchManipulator.createOrUpdate(match) self._step += 1 elif self._step == 8: # After each F match MatchManipulator.createOrUpdate( MatchHelper.play_order_sort_matches( self._played_matches['f'])[self._substep]) if self._substep < len(self._played_matches['f']) - 1: self._substep += 1 else: self._step += 1 self._substep = 0 ndb.get_context().clear_cache() # Re fetch event matches event = Event.get_by_id('2016nytr') MatchHelper.deleteInvalidMatches(event.matches) ndb.get_context().clear_cache() self._update_rankings()<|fim▁end|>
<|file_name|>textencoder.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::Bindings::TextEncoderBinding; use dom::bindings::codegen::Bindings::TextEncoderBinding::TextEncoderMethods; use dom::bindings::global::GlobalRef; use dom::bindings::error::Fallible; use dom::bindings::error::Error::Range; use dom::bindings::js::{JSRef, Temporary}; use dom::bindings::str::USVString; use dom::bindings::utils::{Reflector, reflect_dom_object}; use util::str::DOMString; use std::borrow::ToOwned; use std::ascii::AsciiExt; use std::ptr;<|fim▁hole|>use encoding::{Encoding, EncoderTrap}; use encoding::label::encoding_from_whatwg_label; use libc::uint8_t; use js::jsapi::{JSContext, JSObject}; use js::jsfriendapi::bindgen::{JS_NewUint8Array, JS_GetUint8ArrayData}; #[dom_struct] pub struct TextEncoder { reflector_: Reflector, encoding: DOMString, encoder: EncodingRef, } impl TextEncoder { fn new_inherited(encoding: DOMString, encoder: EncodingRef) -> TextEncoder { TextEncoder { reflector_: Reflector::new(), encoding: encoding, encoder: encoder, } } pub fn new(global: GlobalRef, encoding: DOMString, encoder: EncodingRef) -> Temporary<TextEncoder> { reflect_dom_object(box TextEncoder::new_inherited(encoding, encoder), global, TextEncoderBinding::Wrap) } // https://encoding.spec.whatwg.org/#dom-textencoder pub fn Constructor(global: GlobalRef, label: DOMString) -> Fallible<Temporary<TextEncoder>> { let encoding = match encoding_from_whatwg_label(&label.trim().to_ascii_lowercase()) { Some(enc) => enc, None => { debug!("Encoding Label Not Supported"); return Err(Range("The given encoding is not supported.".to_owned())) } }; match encoding.name() { "utf-8" | "utf-16be" | "utf-16le" => { Ok(TextEncoder::new(global, encoding.name().to_owned(), encoding)) } _ => { debug!("Encoding Not UTF"); return Err(Range("The encoding must be utf-8, utf-16le, or utf-16be.".to_owned())) } } } } impl<'a> TextEncoderMethods for JSRef<'a, TextEncoder> { // https://encoding.spec.whatwg.org/#dom-textencoder-encoding fn Encoding(self) -> DOMString { self.encoding.clone() } // https://encoding.spec.whatwg.org/#dom-textencoder-encode #[allow(unsafe_code)] fn Encode(self, cx: *mut JSContext, input: USVString) -> *mut JSObject { unsafe { let output = self.encoder.encode(&input.0, EncoderTrap::Strict).unwrap(); let length = output.len() as u32; let js_object: *mut JSObject = JS_NewUint8Array(cx, length); let js_object_data: *mut uint8_t = JS_GetUint8ArrayData(js_object, cx); ptr::copy_nonoverlapping(js_object_data, output.as_ptr(), length as usize); return js_object; } } }<|fim▁end|>
use encoding::types::EncodingRef;
<|file_name|>scrambler.py<|end_file_name|><|fim▁begin|># Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Lint as: python3 """Simple scrambling test generator.""" import copy import random from typing import List, Text, Optional from lit_nlp.api import components as lit_components from lit_nlp.api import dataset as lit_dataset from lit_nlp.api import model as lit_model from lit_nlp.api import types from lit_nlp.lib import utils JsonDict = types.JsonDict FIELDS_TO_SCRAMBLE_KEY = 'Fields to scramble' class Scrambler(lit_components.Generator): """Scramble all words in an example to generate a new example.""" @staticmethod def scramble(val: Text) -> Text: words = val.split(' ') random.shuffle(words) return ' '.join(words) def config_spec(self) -> types.Spec: return { FIELDS_TO_SCRAMBLE_KEY: types.MultiFieldMatcher( spec='input', types=['TextSegment'], select_all=True), } def generate(self, example: JsonDict, model: lit_model.Model, dataset: lit_dataset.Dataset, config: Optional[JsonDict] = None) -> List[JsonDict]: """Naively scramble all words in an example. Note: Even if more than one field is to be scrambled, only a single example will be produced, unlike other generators which will produce multiple examples, one per field. Args: example: the example used for basis of generated examples. model: the model. dataset: the dataset. config: user-provided config properties. Returns: examples: a list of generated examples. """ del model # Unused. config = config or {} # If config key is missing, generate no examples.<|fim▁hole|> # TODO(lit-dev): move this to generate_all(), so we read the spec once # instead of on every example. text_keys = utils.find_spec_keys(dataset.spec(), types.TextSegment) if not text_keys: return [] text_keys = [key for key in text_keys if key in fields_to_scramble] new_example = copy.deepcopy(example) for text_key in text_keys: new_example[text_key] = self.scramble(example[text_key]) return [new_example]<|fim▁end|>
fields_to_scramble = list(config.get(FIELDS_TO_SCRAMBLE_KEY, [])) if not fields_to_scramble: return []
<|file_name|>canvas.cpp<|end_file_name|><|fim▁begin|>/** * This file is part of RT2D, a 2D OpenGL framework. * * - Copyright 2017 Rik Teerling <[email protected]> * - Initial commit */ #include "canvas.h" Canvas::Canvas() : Entity() { this->init(16); } Canvas::Canvas(int pixelsize) : Entity() { this->init(pixelsize); } Canvas::~Canvas() { } void Canvas::update(float deltaTime) { }<|fim▁hole|>{ this->position = Point2(SWIDTH/2, SHEIGHT/2); this->scale = Point2(pixelsize, pixelsize); // width, height, bitdepth, filter, wrap PixelBuffer tmp = PixelBuffer(SWIDTH/pixelsize, SHEIGHT/pixelsize, 4, 0, 0); this->addDynamicSprite(&tmp); // get the pixels from the texture and make the framebuffer point to it this->_framebuffer = this->sprite()->texture()->pixels(); this->_width = SWIDTH / pixelsize; this->_height = SHEIGHT / pixelsize; backgroundcolor = RGBAColor(0, 0, 0, 0); this->fill(backgroundcolor); } void Canvas::setPixel(int x, int y, RGBAColor color) { this->_framebuffer->setPixel(x, y, color); } RGBAColor Canvas::getPixel(int x, int y) { return this->_framebuffer->getPixel(x, y); } void Canvas::clearPixel(int x, int y) { this->_framebuffer->setPixel(x, y, backgroundcolor); } void Canvas::fill(RGBAColor color) { // fill framebuffer with color for (long y=0; y<_framebuffer->height; y++) { for (long x=0; x<_framebuffer->width; x++) { this->setPixel(x, y, color); } } } void Canvas::drawSprite(const PixelSprite& spr) { size_t s = spr.pixels.size(); for (size_t i = 0; i < s; i++) { this->setPixel(spr.pixels[i].position.x + spr.position.x, spr.pixels[i].position.y + spr.position.y, spr.pixels[i].color); } } void Canvas::clearSprite(const PixelSprite& spr) { size_t s = spr.pixels.size(); for (size_t i = 0; i < s; i++) { this->clearPixel(spr.pixels[i].position.x + spr.position.x, spr.pixels[i].position.y + spr.position.y); } } void Canvas::drawLine(Vector2f from, Vector2f to, RGBAColor color) { float x0 = from.x; float y0 = from.y; float x1 = to.x; float y1 = to.y; bool steep = false; if (std::abs(x0-x1) < std::abs(y0-y1)) { std::swap(x0, y0); std::swap(x1, y1); steep = true; } if (x0 > x1) { std::swap(x0, x1); std::swap(y0, y1); } int dx = x1-x0; int dy = y1-y0; int derror2 = std::abs(dy)*2; int error2 = 0; int y = y0; for (int x = x0; x <= x1; x++) { if (steep) { this->setPixel(y, x, color); } else { this->setPixel(x, y, color); } error2 += derror2; if (error2 > dx) { y += (y1 > y0 ? 1 : -1); error2 -= dx*2; } } }<|fim▁end|>
void Canvas::init(int pixelsize)
<|file_name|>plot.py<|end_file_name|><|fim▁begin|>import locale import sys from matplotlib import pylab as plt from scipy.interpolate import interp1d import numpy def parse_pRDF(f): pRDFs={} count=0 for line in open(f).readlines(): words=line.split() if words[0]=="dstep": dstep=locale.atof(words[1]) continue elif words[0]=="nstep": nstep=locale.atof(words[1]) continue else: atom_type = words[0] pRDF=[] for word in words[1:]: pRDF.append(locale.atof(word)) pRDFs[atom_type]=pRDF return (pRDFs,dstep) f=sys.argv[1]<|fim▁hole|>X = numpy.linspace(0, 10+dstep, int(10/dstep)+1) scale=3 Xf = numpy.linspace(0, 10+dstep, (int(10/dstep)+1)*scale) for atom_type, pRDF in pRDFs.iteritems(): Y=numpy.array(pRDF) #f = interp1d(X,Y,kind='cubic') plt.plot(X[:-1],Y[:-1],colors[atom_type], label=atom_type) #plt.plot(Xf[:-scale],f(Xf)[:-scale],colors[atom_type], label=atom_type) plt.hold(True) plt.legend() plt.xlabel("R (A)") plt.ylabel("protons/A3") plt.show()<|fim▁end|>
(pRDFs, dstep)=parse_pRDF(f) colors={"C":"k", "HC":"r", "N":"b", "HN":"g", "O":"m", "HO":"y", "S":"c"}
<|file_name|>TiffImagePlugin.py<|end_file_name|><|fim▁begin|># # The Python Imaging Library. # $Id$ # # TIFF file handling # # TIFF is a flexible, if somewhat aged, image file format originally # defined by Aldus. Although TIFF supports a wide variety of pixel # layouts and compression methods, the name doesn't really stand for # "thousands of incompatible file formats," it just feels that way. # # To read TIFF data from a stream, the stream must be seekable. For # progressive decoding, make sure to use TIFF files where the tag # directory is placed first in the file. # # History: # 1995-09-01 fl Created # 1996-05-04 fl Handle JPEGTABLES tag # 1996-05-18 fl Fixed COLORMAP support # 1997-01-05 fl Fixed PREDICTOR support # 1997-08-27 fl Added support for rational tags (from Perry Stoll) # 1998-01-10 fl Fixed seek/tell (from Jan Blom) # 1998-07-15 fl Use private names for internal variables # 1999-06-13 fl Rewritten for PIL 1.0 (1.0) # 2000-10-11 fl Additional fixes for Python 2.0 (1.1) # 2001-04-17 fl Fixed rewind support (seek to frame 0) (1.2) # 2001-05-12 fl Added write support for more tags (from Greg Couch) (1.3) # 2001-12-18 fl Added workaround for broken Matrox library # 2002-01-18 fl Don't mess up if photometric tag is missing (D. Alan Stewart) # 2003-05-19 fl Check FILLORDER tag # 2003-09-26 fl Added RGBa support # 2004-02-24 fl Added DPI support; fixed rational write support # 2005-02-07 fl Added workaround for broken Corel Draw 10 files # 2006-01-09 fl Added support for float/double tags (from Russell Nelson) # # Copyright (c) 1997-2006 by Secret Labs AB. All rights reserved. # Copyright (c) 1995-1997 by Fredrik Lundh # # See the README file for information on usage and redistribution. # __version__ = "1.3.5" import Image, ImageFile import array, string, sys import ImagePalette II = "II" # little-endian (intel-style) MM = "MM" # big-endian (motorola-style) try: if sys.byteorder == "little": native_prefix = II else: native_prefix = MM except AttributeError: if ord(array.array("i",[1]).tostring()[0]): native_prefix = II else: native_prefix = MM # # -------------------------------------------------------------------- # Read TIFF files def il16(c,o=0): return ord(c[o]) + (ord(c[o+1])<<8) def il32(c,o=0): return ord(c[o]) + (ord(c[o+1])<<8) + (ord(c[o+2])<<16) + (ord(c[o+3])<<24) def ol16(i): return chr(i&255) + chr(i>>8&255) def ol32(i): return chr(i&255) + chr(i>>8&255) + chr(i>>16&255) + chr(i>>24&255) def ib16(c,o=0): return ord(c[o+1]) + (ord(c[o])<<8) def ib32(c,o=0): return ord(c[o+3]) + (ord(c[o+2])<<8) + (ord(c[o+1])<<16) + (ord(c[o])<<24) def ob16(i): return chr(i>>8&255) + chr(i&255) def ob32(i): return chr(i>>24&255) + chr(i>>16&255) + chr(i>>8&255) + chr(i&255) # a few tag names, just to make the code below a bit more readable IMAGEWIDTH = 256 IMAGELENGTH = 257 BITSPERSAMPLE = 258 COMPRESSION = 259 PHOTOMETRIC_INTERPRETATION = 262 FILLORDER = 266 IMAGEDESCRIPTION = 270 STRIPOFFSETS = 273 SAMPLESPERPIXEL = 277 ROWSPERSTRIP = 278 STRIPBYTECOUNTS = 279 X_RESOLUTION = 282 Y_RESOLUTION = 283 PLANAR_CONFIGURATION = 284 RESOLUTION_UNIT = 296 SOFTWARE = 305 DATE_TIME = 306 ARTIST = 315 PREDICTOR = 317 COLORMAP = 320 TILEOFFSETS = 324 EXTRASAMPLES = 338 SAMPLEFORMAT = 339 JPEGTABLES = 347 COPYRIGHT = 33432 IPTC_NAA_CHUNK = 33723 # newsphoto properties PHOTOSHOP_CHUNK = 34377 # photoshop properties ICCPROFILE = 34675 EXIFIFD = 34665 XMP = 700 COMPRESSION_INFO = { # Compression => pil compression name 1: "raw", 2: "tiff_ccitt", 3: "group3", 4: "group4", 5: "tiff_lzw", 6: "tiff_jpeg", # obsolete 7: "jpeg", 32771: "tiff_raw_16", # 16-bit padding 32773: "packbits" } OPEN_INFO = { # (ByteOrder, PhotoInterpretation, SampleFormat, FillOrder, BitsPerSample, # ExtraSamples) => mode, rawmode (II, 0, 1, 1, (1,), ()): ("1", "1;I"), (II, 0, 1, 2, (1,), ()): ("1", "1;IR"), (II, 0, 1, 1, (8,), ()): ("L", "L;I"), (II, 0, 1, 2, (8,), ()): ("L", "L;IR"), (II, 1, 1, 1, (1,), ()): ("1", "1"), (II, 1, 1, 2, (1,), ()): ("1", "1;R"), (II, 1, 1, 1, (8,), ()): ("L", "L"), (II, 1, 1, 1, (8,8), (2,)): ("LA", "LA"), (II, 1, 1, 2, (8,), ()): ("L", "L;R"), (II, 1, 1, 1, (16,), ()): ("I;16", "I;16"), (II, 1, 2, 1, (16,), ()): ("I;16S", "I;16S"), (II, 1, 2, 1, (32,), ()): ("I", "I;32S"), (II, 1, 3, 1, (32,), ()): ("F", "F;32F"), (II, 2, 1, 1, (8,8,8), ()): ("RGB", "RGB"), (II, 2, 1, 2, (8,8,8), ()): ("RGB", "RGB;R"), (II, 2, 1, 1, (8,8,8,8), (0,)): ("RGBX", "RGBX"), (II, 2, 1, 1, (8,8,8,8), (1,)): ("RGBA", "RGBa"), (II, 2, 1, 1, (8,8,8,8), (2,)): ("RGBA", "RGBA"), (II, 2, 1, 1, (8,8,8,8), (999,)): ("RGBA", "RGBA"), # corel draw 10 (II, 3, 1, 1, (1,), ()): ("P", "P;1"), (II, 3, 1, 2, (1,), ()): ("P", "P;1R"), (II, 3, 1, 1, (2,), ()): ("P", "P;2"), (II, 3, 1, 2, (2,), ()): ("P", "P;2R"), (II, 3, 1, 1, (4,), ()): ("P", "P;4"), (II, 3, 1, 2, (4,), ()): ("P", "P;4R"), (II, 3, 1, 1, (8,), ()): ("P", "P"), (II, 3, 1, 1, (8,8), (2,)): ("PA", "PA"), (II, 3, 1, 2, (8,), ()): ("P", "P;R"), (II, 5, 1, 1, (8,8,8,8), ()): ("CMYK", "CMYK"), (II, 6, 1, 1, (8,8,8), ()): ("YCbCr", "YCbCr"), (II, 8, 1, 1, (8,8,8), ()): ("LAB", "LAB"), (MM, 0, 1, 1, (1,), ()): ("1", "1;I"), (MM, 0, 1, 2, (1,), ()): ("1", "1;IR"), (MM, 0, 1, 1, (8,), ()): ("L", "L;I"), (MM, 0, 1, 2, (8,), ()): ("L", "L;IR"), (MM, 1, 1, 1, (1,), ()): ("1", "1"), (MM, 1, 1, 2, (1,), ()): ("1", "1;R"), (MM, 1, 1, 1, (8,), ()): ("L", "L"), (MM, 1, 1, 1, (8,8), (2,)): ("LA", "LA"), (MM, 1, 1, 2, (8,), ()): ("L", "L;R"), (MM, 1, 1, 1, (16,), ()): ("I;16B", "I;16B"), (MM, 1, 2, 1, (16,), ()): ("I;16BS", "I;16BS"), (MM, 1, 2, 1, (32,), ()): ("I;32BS", "I;32BS"), (MM, 1, 3, 1, (32,), ()): ("F;32BF", "F;32BF"), (MM, 2, 1, 1, (8,8,8), ()): ("RGB", "RGB"), (MM, 2, 1, 2, (8,8,8), ()): ("RGB", "RGB;R"), (MM, 2, 1, 1, (8,8,8,8), (0,)): ("RGBX", "RGBX"), (MM, 2, 1, 1, (8,8,8,8), (1,)): ("RGBA", "RGBa"), (MM, 2, 1, 1, (8,8,8,8), (2,)): ("RGBA", "RGBA"), (MM, 2, 1, 1, (8,8,8,8), (999,)): ("RGBA", "RGBA"), # corel draw 10 (MM, 3, 1, 1, (1,), ()): ("P", "P;1"), (MM, 3, 1, 2, (1,), ()): ("P", "P;1R"), (MM, 3, 1, 1, (2,), ()): ("P", "P;2"), (MM, 3, 1, 2, (2,), ()): ("P", "P;2R"), (MM, 3, 1, 1, (4,), ()): ("P", "P;4"), (MM, 3, 1, 2, (4,), ()): ("P", "P;4R"), (MM, 3, 1, 1, (8,), ()): ("P", "P"), (MM, 3, 1, 1, (8,8), (2,)): ("PA", "PA"), (MM, 3, 1, 2, (8,), ()): ("P", "P;R"), (MM, 5, 1, 1, (8,8,8,8), ()): ("CMYK", "CMYK"), (MM, 6, 1, 1, (8,8,8), ()): ("YCbCr", "YCbCr"), (MM, 8, 1, 1, (8,8,8), ()): ("LAB", "LAB"), } PREFIXES = ["MM\000\052", "II\052\000", "II\xBC\000"] def _accept(prefix): return prefix[:4] in PREFIXES ## # Wrapper for TIFF IFDs. class ImageFileDirectory: # represents a TIFF tag directory. to speed things up, # we don't decode tags unless they're asked for. def __init__(self, prefix): self.prefix = prefix[:2] if self.prefix == MM: self.i16, self.i32 = ib16, ib32 self.o16, self.o32 = ob16, ob32 elif self.prefix == II: self.i16, self.i32 = il16, il32 self.o16, self.o32 = ol16, ol32 else: raise SyntaxError("not a TIFF IFD") self.reset() def reset(self): self.tags = {} self.tagdata = {} self.tagtype = {} # added 2008-06-05 by Florian Hoech self.next = None # dictionary API (sort of) def keys(self): return self.tagdata.keys() + self.tags.keys() def items(self): items = self.tags.items() for tag in self.tagdata.keys(): items.append((tag, self[tag])) return items def __len__(self): return len(self.tagdata) + len(self.tags) def __getitem__(self, tag): try: return self.tags[tag] except KeyError: type, data = self.tagdata[tag] # unpack on the fly size, handler = self.load_dispatch[type] self.tags[tag] = data = handler(self, data) del self.tagdata[tag] return data def get(self, tag, default=None): try: return self[tag] except KeyError: return default def getscalar(self, tag, default=None): try: value = self[tag] if len(value) != 1: if tag == SAMPLEFORMAT: # work around broken (?) matrox library # (from Ted Wright, via Bob Klimek) raise KeyError # use default raise ValueError, "not a scalar" return value[0] except KeyError: if default is None: raise return default def has_key(self, tag): return self.tags.has_key(tag) or self.tagdata.has_key(tag) def __setitem__(self, tag, value): if type(value) is not type(()): value = (value,) self.tags[tag] = value # load primitives load_dispatch = {} def load_byte(self, data): l = [] for i in range(len(data)): l.append(ord(data[i])) return tuple(l) load_dispatch[1] = (1, load_byte) def load_string(self, data): if data[-1:] == '\0': data = data[:-1] return data load_dispatch[2] = (1, load_string) def load_short(self, data): l = [] for i in range(0, len(data), 2): l.append(self.i16(data, i)) return tuple(l) load_dispatch[3] = (2, load_short) def load_long(self, data): l = [] for i in range(0, len(data), 4): l.append(self.i32(data, i)) return tuple(l) load_dispatch[4] = (4, load_long) def load_rational(self, data): l = [] for i in range(0, len(data), 8): l.append((self.i32(data, i), self.i32(data, i+4))) return tuple(l) load_dispatch[5] = (8, load_rational) def load_float(self, data): a = array.array("f", data) if self.prefix != native_prefix: a.byteswap() return tuple(a) load_dispatch[11] = (4, load_float) def load_double(self, data): a = array.array("d", data) if self.prefix != native_prefix: a.byteswap() return tuple(a) load_dispatch[12] = (8, load_double) <|fim▁hole|> def load(self, fp): # load tag dictionary self.reset() i16 = self.i16 i32 = self.i32 for i in range(i16(fp.read(2))): ifd = fp.read(12) tag, typ = i16(ifd), i16(ifd, 2) if Image.DEBUG: import TiffTags tagname = TiffTags.TAGS.get(tag, "unknown") typname = TiffTags.TYPES.get(typ, "unknown") print "tag: %s (%d)" % (tagname, tag), print "- type: %s (%d)" % (typname, typ), try: dispatch = self.load_dispatch[typ] except KeyError: if Image.DEBUG: print "- unsupported type", typ continue # ignore unsupported type size, handler = dispatch size = size * i32(ifd, 4) # Get and expand tag value if size > 4: here = fp.tell() fp.seek(i32(ifd, 8)) data = ImageFile._safe_read(fp, size) fp.seek(here) else: data = ifd[8:8+size] if len(data) != size: raise IOError, "not enough data" self.tagdata[tag] = typ, data self.tagtype[tag] = typ if Image.DEBUG: if tag in (COLORMAP, IPTC_NAA_CHUNK, PHOTOSHOP_CHUNK, ICCPROFILE, XMP): print "- value: <table: %d bytes>" % size else: print "- value:", self[tag] self.next = i32(fp.read(4)) # save primitives def save(self, fp): o16 = self.o16 o32 = self.o32 fp.write(o16(len(self.tags))) # always write in ascending tag order tags = self.tags.items() tags.sort() directory = [] append = directory.append offset = fp.tell() + len(self.tags) * 12 + 4 stripoffsets = None # pass 1: convert tags to binary format for tag, value in tags: typ = None if self.tagtype.has_key(tag): typ = self.tagtype[tag] if typ == 1: # byte data data = value = string.join(map(chr, value), "") elif typ == 7: # untyped data data = value = string.join(value, "") elif type(value[0]) is type(""): # string data typ = 2 data = value = string.join(value, "\0") + "\0" else: # integer data if tag == STRIPOFFSETS: stripoffsets = len(directory) typ = 4 # to avoid catch-22 elif tag in (X_RESOLUTION, Y_RESOLUTION): # identify rational data fields typ = 5 elif not typ: typ = 3 for v in value: if v >= 65536: typ = 4 if typ == 3: data = string.join(map(o16, value), "") else: data = string.join(map(o32, value), "") if Image.DEBUG: import TiffTags tagname = TiffTags.TAGS.get(tag, "unknown") typname = TiffTags.TYPES.get(typ, "unknown") print "save: %s (%d)" % (tagname, tag), print "- type: %s (%d)" % (typname, typ), if tag in (COLORMAP, IPTC_NAA_CHUNK, PHOTOSHOP_CHUNK, ICCPROFILE, XMP): size = len(data) print "- value: <table: %d bytes>" % size else: print "- value:", value # figure out if data fits into the directory if len(data) == 4: append((tag, typ, len(value), data, "")) elif len(data) < 4: append((tag, typ, len(value), data + (4-len(data))*"\0", "")) else: count = len(value) if typ == 5: count = count / 2 # adjust for rational data field append((tag, typ, count, o32(offset), data)) offset = offset + len(data) if offset & 1: offset = offset + 1 # word padding # update strip offset data to point beyond auxiliary data if stripoffsets is not None: tag, typ, count, value, data = directory[stripoffsets] assert not data, "multistrip support not yet implemented" value = o32(self.i32(value) + offset) directory[stripoffsets] = tag, typ, count, value, data # pass 2: write directory to file for tag, typ, count, value, data in directory: if Image.DEBUG > 1: print tag, typ, count, repr(value), repr(data) fp.write(o16(tag) + o16(typ) + o32(count) + value) # -- overwrite here for multi-page -- fp.write("\0\0\0\0") # end of directory # pass 3: write auxiliary data to file for tag, typ, count, value, data in directory: fp.write(data) if len(data) & 1: fp.write("\0") return offset ## # Image plugin for TIFF files. class TiffImageFile(ImageFile.ImageFile): format = "TIFF" format_description = "Adobe TIFF" def _open(self): "Open the first image in a TIFF file" # Header ifh = self.fp.read(8) if ifh[:4] not in PREFIXES: raise SyntaxError, "not a TIFF file" # image file directory (tag dictionary) self.tag = self.ifd = ImageFileDirectory(ifh[:2]) # setup frame pointers self.__first = self.__next = self.ifd.i32(ifh, 4) self.__frame = -1 self.__fp = self.fp # and load the first frame self._seek(0) def seek(self, frame): "Select a given frame as current image" if frame < 0: frame = 0 self._seek(frame) def tell(self): "Return the current frame number" return self._tell() def _seek(self, frame): self.fp = self.__fp if frame < self.__frame: # rewind file self.__frame = -1 self.__next = self.__first while self.__frame < frame: if not self.__next: raise EOFError, "no more images in TIFF file" self.fp.seek(self.__next) self.tag.load(self.fp) self.__next = self.tag.next self.__frame = self.__frame + 1 self._setup() def _tell(self): return self.__frame def _decoder(self, rawmode, layer): "Setup decoder contexts" args = None if rawmode == "RGB" and self._planar_configuration == 2: rawmode = rawmode[layer] compression = self._compression if compression == "raw": args = (rawmode, 0, 1) elif compression == "jpeg": args = rawmode, "" if self.tag.has_key(JPEGTABLES): # Hack to handle abbreviated JPEG headers self.tile_prefix = self.tag[JPEGTABLES] elif compression == "packbits": args = rawmode elif compression == "tiff_lzw": args = rawmode if self.tag.has_key(317): # Section 14: Differencing Predictor self.decoderconfig = (self.tag[PREDICTOR][0],) if self.tag.has_key(ICCPROFILE): self.info['icc_profile'] = self.tag[ICCPROFILE] return args def _setup(self): "Setup this image object based on current tags" if self.tag.has_key(0xBC01): raise IOError, "Windows Media Photo files not yet supported" getscalar = self.tag.getscalar # extract relevant tags self._compression = COMPRESSION_INFO[getscalar(COMPRESSION, 1)] self._planar_configuration = getscalar(PLANAR_CONFIGURATION, 1) # photometric is a required tag, but not everyone is reading # the specification photo = getscalar(PHOTOMETRIC_INTERPRETATION, 0) fillorder = getscalar(FILLORDER, 1) if Image.DEBUG: print "*** Summary ***" print "- compression:", self._compression print "- photometric_interpretation:", photo print "- planar_configuration:", self._planar_configuration print "- fill_order:", fillorder # size xsize = getscalar(IMAGEWIDTH) ysize = getscalar(IMAGELENGTH) self.size = xsize, ysize if Image.DEBUG: print "- size:", self.size format = getscalar(SAMPLEFORMAT, 1) # mode: check photometric interpretation and bits per pixel key = ( self.tag.prefix, photo, format, fillorder, self.tag.get(BITSPERSAMPLE, (1,)), self.tag.get(EXTRASAMPLES, ()) ) if Image.DEBUG: print "format key:", key try: self.mode, rawmode = OPEN_INFO[key] except KeyError: if Image.DEBUG: print "- unsupported format" raise SyntaxError, "unknown pixel mode" if Image.DEBUG: print "- raw mode:", rawmode print "- pil mode:", self.mode self.info["compression"] = self._compression xres = getscalar(X_RESOLUTION, (1, 1)) yres = getscalar(Y_RESOLUTION, (1, 1)) if xres and yres: xres = xres[0] / (xres[1] or 1) yres = yres[0] / (yres[1] or 1) resunit = getscalar(RESOLUTION_UNIT, 1) if resunit == 2: # dots per inch self.info["dpi"] = xres, yres elif resunit == 3: # dots per centimeter. convert to dpi self.info["dpi"] = xres * 2.54, yres * 2.54 else: # No absolute unit of measurement self.info["resolution"] = xres, yres # build tile descriptors x = y = l = 0 self.tile = [] if self.tag.has_key(STRIPOFFSETS): # striped image h = getscalar(ROWSPERSTRIP, ysize) w = self.size[0] a = None for o in self.tag[STRIPOFFSETS]: if not a: a = self._decoder(rawmode, l) self.tile.append( (self._compression, (0, min(y, ysize), w, min(y+h, ysize)), o, a)) y = y + h if y >= self.size[1]: x = y = 0 l = l + 1 a = None elif self.tag.has_key(TILEOFFSETS): # tiled image w = getscalar(322) h = getscalar(323) a = None for o in self.tag[TILEOFFSETS]: if not a: a = self._decoder(rawmode, l) self.tile.append( (self._compression, (x, y, x+w, y+h), o, a)) x = x + w if x >= self.size[0]: x, y = 0, y + h if y >= self.size[1]: x = y = 0 l = l + 1 a = None else: if Image.DEBUG: print "- unsupported data organization" raise SyntaxError("unknown data organization") # fixup palette descriptor if self.mode == "P": palette = map(lambda a: chr(a / 256), self.tag[COLORMAP]) self.palette = ImagePalette.raw("RGB;L", string.join(palette, "")) # # -------------------------------------------------------------------- # Write TIFF files # little endian is default except for image modes with explict big endian byte-order SAVE_INFO = { # mode => rawmode, byteorder, photometrics, sampleformat, bitspersample, extra "1": ("1", II, 1, 1, (1,), None), "L": ("L", II, 1, 1, (8,), None), "LA": ("LA", II, 1, 1, (8,8), 2), "P": ("P", II, 3, 1, (8,), None), "PA": ("PA", II, 3, 1, (8,8), 2), "I": ("I;32S", II, 1, 2, (32,), None), "I;16": ("I;16", II, 1, 1, (16,), None), "I;16S": ("I;16S", II, 1, 2, (16,), None), "F": ("F;32F", II, 1, 3, (32,), None), "RGB": ("RGB", II, 2, 1, (8,8,8), None), "RGBX": ("RGBX", II, 2, 1, (8,8,8,8), 0), "RGBA": ("RGBA", II, 2, 1, (8,8,8,8), 2), "CMYK": ("CMYK", II, 5, 1, (8,8,8,8), None), "YCbCr": ("YCbCr", II, 6, 1, (8,8,8), None), "LAB": ("LAB", II, 8, 1, (8,8,8), None), "I;32BS": ("I;32BS", MM, 1, 2, (32,), None), "I;16B": ("I;16B", MM, 1, 1, (16,), None), "I;16BS": ("I;16BS", MM, 1, 2, (16,), None), "F;32BF": ("F;32BF", MM, 1, 3, (32,), None), } def _cvt_res(value): # convert value to TIFF rational number -- (numerator, denominator) if type(value) in (type([]), type(())): assert(len(value) % 2 == 0) return value if type(value) == type(1): return (value, 1) value = float(value) return (int(value * 65536), 65536) def _save(im, fp, filename): try: rawmode, prefix, photo, format, bits, extra = SAVE_INFO[im.mode] except KeyError: raise IOError, "cannot write mode %s as TIFF" % im.mode ifd = ImageFileDirectory(prefix) # -- multi-page -- skip TIFF header on subsequent pages if fp.tell() == 0: # tiff header (write via IFD to get everything right) # PIL always starts the first IFD at offset 8 fp.write(ifd.prefix + ifd.o16(42) + ifd.o32(8)) ifd[IMAGEWIDTH] = im.size[0] ifd[IMAGELENGTH] = im.size[1] # additions written by Greg Couch, [email protected] # inspired by image-sig posting from Kevin Cazabon, [email protected] if hasattr(im, 'tag'): # preserve tags from original TIFF image file for key in (RESOLUTION_UNIT, X_RESOLUTION, Y_RESOLUTION): if im.tag.tagdata.has_key(key): ifd[key] = im.tag.tagdata.get(key) # preserve some more tags from original TIFF image file # -- 2008-06-06 Florian Hoech ifd.tagtype = im.tag.tagtype for key in (IPTC_NAA_CHUNK, PHOTOSHOP_CHUNK, XMP): if im.tag.has_key(key): ifd[key] = im.tag[key] # preserve ICC profile (should also work when saving other formats # which support profiles as TIFF) -- 2008-06-06 Florian Hoech if im.info.has_key("icc_profile"): ifd[ICCPROFILE] = im.info["icc_profile"] if im.encoderinfo.has_key("description"): ifd[IMAGEDESCRIPTION] = im.encoderinfo["description"] if im.encoderinfo.has_key("resolution"): ifd[X_RESOLUTION] = ifd[Y_RESOLUTION] \ = _cvt_res(im.encoderinfo["resolution"]) if im.encoderinfo.has_key("x resolution"): ifd[X_RESOLUTION] = _cvt_res(im.encoderinfo["x resolution"]) if im.encoderinfo.has_key("y resolution"): ifd[Y_RESOLUTION] = _cvt_res(im.encoderinfo["y resolution"]) if im.encoderinfo.has_key("resolution unit"): unit = im.encoderinfo["resolution unit"] if unit == "inch": ifd[RESOLUTION_UNIT] = 2 elif unit == "cm" or unit == "centimeter": ifd[RESOLUTION_UNIT] = 3 else: ifd[RESOLUTION_UNIT] = 1 if im.encoderinfo.has_key("software"): ifd[SOFTWARE] = im.encoderinfo["software"] if im.encoderinfo.has_key("date time"): ifd[DATE_TIME] = im.encoderinfo["date time"] if im.encoderinfo.has_key("artist"): ifd[ARTIST] = im.encoderinfo["artist"] if im.encoderinfo.has_key("copyright"): ifd[COPYRIGHT] = im.encoderinfo["copyright"] dpi = im.encoderinfo.get("dpi") if dpi: ifd[RESOLUTION_UNIT] = 2 ifd[X_RESOLUTION] = _cvt_res(dpi[0]) ifd[Y_RESOLUTION] = _cvt_res(dpi[1]) if bits != (1,): ifd[BITSPERSAMPLE] = bits if len(bits) != 1: ifd[SAMPLESPERPIXEL] = len(bits) if extra is not None: ifd[EXTRASAMPLES] = extra if format != 1: ifd[SAMPLEFORMAT] = format ifd[PHOTOMETRIC_INTERPRETATION] = photo if im.mode == "P": lut = im.im.getpalette("RGB", "RGB;L") ifd[COLORMAP] = tuple(map(lambda v: ord(v) * 256, lut)) # data orientation stride = len(bits) * ((im.size[0]*bits[0]+7)/8) ifd[ROWSPERSTRIP] = im.size[1] ifd[STRIPBYTECOUNTS] = stride * im.size[1] ifd[STRIPOFFSETS] = 0 # this is adjusted by IFD writer ifd[COMPRESSION] = 1 # no compression offset = ifd.save(fp) ImageFile._save(im, fp, [ ("raw", (0,0)+im.size, offset, (rawmode, stride, 1)) ]) # -- helper for multi-page save -- if im.encoderinfo.has_key("_debug_multipage"): #just to access o32 and o16 (using correct byte order) im._debug_multipage = ifd # # -------------------------------------------------------------------- # Register Image.register_open("TIFF", TiffImageFile, _accept) Image.register_save("TIFF", _save) Image.register_extension("TIFF", ".tif") Image.register_extension("TIFF", ".tiff") Image.register_mime("TIFF", "image/tiff")<|fim▁end|>
def load_undefined(self, data): # Untyped data return data load_dispatch[7] = (1, load_undefined)
<|file_name|>WebSocketHandshake.cpp<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2011 Google Inc. All rights reserved. * Copyright (C) Research In Motion Limited 2011. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #if ENABLE(WEB_SOCKETS) #include "WebSocketHandshake.h" #include "Base64.h" #include "Cookie.h" #include "CookieJar.h" #include "Document.h" #include "HTTPHeaderMap.h" #include "KURL.h" #include "Logging.h" #include "ScriptCallStack.h" #include "ScriptExecutionContext.h" #include "SecurityOrigin.h" #include <wtf/CryptographicallyRandomNumber.h> #include <wtf/MD5.h> #include <wtf/SHA1.h> #include <wtf/StdLibExtras.h> #include <wtf/StringExtras.h> #include <wtf/Vector.h> #include <wtf/text/CString.h> #include <wtf/text/StringBuilder.h> #include <wtf/text/WTFString.h> #include <wtf/unicode/CharacterNames.h> namespace WebCore { static const char randomCharacterInSecWebSocketKey[] = "!\"#$%&'()*+,-./:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"; static String resourceName(const KURL& url) { String name = url.path(); if (name.isEmpty()) name = "/"; if (!url.query().isNull()) name += "?" + url.query(); ASSERT(!name.isEmpty()); ASSERT(!name.contains(' ')); return name; } static String hostName(const KURL& url, bool secure) { ASSERT(url.protocolIs("wss") == secure); StringBuilder builder; builder.append(url.host().lower()); if (url.port() && ((!secure && url.port() != 80) || (secure && url.port() != 443))) { builder.append(':'); builder.append(String::number(url.port())); } return builder.toString(); } static const size_t maxConsoleMessageSize = 128; static String trimConsoleMessage(const char* p, size_t len) { String s = String(p, std::min<size_t>(len, maxConsoleMessageSize)); if (len > maxConsoleMessageSize) s.append(horizontalEllipsis); return s; } static uint32_t randomNumberLessThan(uint32_t n) { if (!n) return 0; if (n == std::numeric_limits<uint32_t>::max()) return cryptographicallyRandomNumber(); uint32_t max = std::numeric_limits<uint32_t>::max() - (std::numeric_limits<uint32_t>::max() % n); ASSERT(!(max % n)); uint32_t v; do { v = cryptographicallyRandomNumber(); } while (v >= max); return v % n; } static void generateHixie76SecWebSocketKey(uint32_t& number, String& key) { uint32_t space = randomNumberLessThan(12) + 1; uint32_t max = 4294967295U / space; number = randomNumberLessThan(max); uint32_t product = number * space; String s = String::number(product); int n = randomNumberLessThan(12) + 1; DEFINE_STATIC_LOCAL(String, randomChars, (randomCharacterInSecWebSocketKey)); for (int i = 0; i < n; i++) { int pos = randomNumberLessThan(s.length() + 1); int chpos = randomNumberLessThan(randomChars.length()); s.insert(randomChars.substring(chpos, 1), pos); } DEFINE_STATIC_LOCAL(String, spaceChar, (" ")); for (uint32_t i = 0; i < space; i++) { int pos = randomNumberLessThan(s.length() - 1) + 1; s.insert(spaceChar, pos); } ASSERT(s[0] != ' '); ASSERT(s[s.length() - 1] != ' '); key = s; } static void generateHixie76Key3(unsigned char key3[8]) { cryptographicallyRandomValues(key3, 8); } static void setChallengeNumber(unsigned char* buf, uint32_t number) { unsigned char* p = buf + 3; for (int i = 0; i < 4; i++) { *p = number & 0xFF; --p; number >>= 8; } } static void generateHixie76ExpectedChallengeResponse(uint32_t number1, uint32_t number2, unsigned char key3[8], unsigned char expectedChallenge[16]) { unsigned char challenge[16]; setChallengeNumber(&challenge[0], number1); setChallengeNumber(&challenge[4], number2); memcpy(&challenge[8], key3, 8); MD5 md5; md5.addBytes(challenge, sizeof(challenge)); Vector<uint8_t, 16> digest; md5.checksum(digest); memcpy(expectedChallenge, digest.data(), 16); } static String generateSecWebSocketKey() { static const size_t nonceSize = 16; unsigned char key[nonceSize]; cryptographicallyRandomValues(key, nonceSize); return base64Encode(reinterpret_cast<char*>(key), nonceSize); } static String getExpectedWebSocketAccept(const String& secWebSocketKey) { static const char* const webSocketKeyGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; static const size_t sha1HashSize = 20; // FIXME: This should be defined in SHA1.h. SHA1 sha1; CString keyData = secWebSocketKey.ascii(); sha1.addBytes(reinterpret_cast<const uint8_t*>(keyData.data()), keyData.length()); sha1.addBytes(reinterpret_cast<const uint8_t*>(webSocketKeyGUID), strlen(webSocketKeyGUID)); Vector<uint8_t, sha1HashSize> hash; sha1.computeHash(hash); return base64Encode(reinterpret_cast<const char*>(hash.data()), sha1HashSize); } WebSocketHandshake::WebSocketHandshake(const KURL& url, const String& protocol, ScriptExecutionContext* context, bool useHixie76Protocol) : m_url(url) , m_clientProtocol(protocol) , m_secure(m_url.protocolIs("wss")) , m_context(context) , m_useHixie76Protocol(useHixie76Protocol) , m_mode(Incomplete) { if (m_useHixie76Protocol) { uint32_t number1; uint32_t number2; generateHixie76SecWebSocketKey(number1, m_hixie76SecWebSocketKey1); generateHixie76SecWebSocketKey(number2, m_hixie76SecWebSocketKey2); generateHixie76Key3(m_hixie76Key3); generateHixie76ExpectedChallengeResponse(number1, number2, m_hixie76Key3, m_hixie76ExpectedChallengeResponse); } else { m_secWebSocketKey = generateSecWebSocketKey(); m_expectedAccept = getExpectedWebSocketAccept(m_secWebSocketKey); } } WebSocketHandshake::~WebSocketHandshake() { } const KURL& WebSocketHandshake::url() const { return m_url; } void WebSocketHandshake::setURL(const KURL& url) { m_url = url.copy(); } const String WebSocketHandshake::host() const { return m_url.host().lower(); } const String& WebSocketHandshake::clientProtocol() const { return m_clientProtocol; } void WebSocketHandshake::setClientProtocol(const String& protocol) { m_clientProtocol = protocol; } bool WebSocketHandshake::secure() const { return m_secure; } String WebSocketHandshake::clientOrigin() const { return m_context->securityOrigin()->toString(); } String WebSocketHandshake::clientLocation() const { StringBuilder builder; builder.append(m_secure ? "wss" : "ws"); builder.append("://"); builder.append(hostName(m_url, m_secure)); builder.append(resourceName(m_url)); return builder.toString(); } CString WebSocketHandshake::clientHandshakeMessage() const { // Keep the following consistent with clientHandshakeRequest(). StringBuilder builder; builder.append("GET "); builder.append(resourceName(m_url)); builder.append(" HTTP/1.1\r\n"); Vector<String> fields; if (m_useHixie76Protocol) fields.append("Upgrade: WebSocket"); else fields.append("Upgrade: websocket"); fields.append("Connection: Upgrade"); fields.append("Host: " + hostName(m_url, m_secure)); if (m_useHixie76Protocol) fields.append("Origin: " + clientOrigin()); else fields.append("Sec-WebSocket-Origin: " + clientOrigin()); if (!m_clientProtocol.isEmpty()) fields.append("Sec-WebSocket-Protocol: " + m_clientProtocol); KURL url = httpURLForAuthenticationAndCookies(); if (m_context->isDocument()) { Document* document = static_cast<Document*>(m_context); String cookie = cookieRequestHeaderFieldValue(document, url); if (!cookie.isEmpty()) fields.append("Cookie: " + cookie); // Set "Cookie2: <cookie>" if cookies 2 exists for url? } if (m_useHixie76Protocol) { fields.append("Sec-WebSocket-Key1: " + m_hixie76SecWebSocketKey1); fields.append("Sec-WebSocket-Key2: " + m_hixie76SecWebSocketKey2); } else { fields.append("Sec-WebSocket-Key: " + m_secWebSocketKey); fields.append("Sec-WebSocket-Version: 8"); } // Fields in the handshake are sent by the client in a random order; the // order is not meaningful. Thus, it's ok to send the order we constructed // the fields. for (size_t i = 0; i < fields.size(); i++) { builder.append(fields[i]); builder.append("\r\n"); } builder.append("\r\n"); CString handshakeHeader = builder.toString().utf8(); // Hybi-10 handshake is complete at this point. if (!m_useHixie76Protocol) return handshakeHeader; // Hixie-76 protocol requires sending eight-byte data (so-called "key3") after the request header fields. char* characterBuffer = 0; CString msg = CString::newUninitialized(handshakeHeader.length() + sizeof(m_hixie76Key3), characterBuffer); memcpy(characterBuffer, handshakeHeader.data(), handshakeHeader.length()); memcpy(characterBuffer + handshakeHeader.length(), m_hixie76Key3, sizeof(m_hixie76Key3)); return msg; } WebSocketHandshakeRequest WebSocketHandshake::clientHandshakeRequest() const { // Keep the following consistent with clientHandshakeMessage(). // FIXME: do we need to store m_secWebSocketKey1, m_secWebSocketKey2 and // m_key3 in WebSocketHandshakeRequest? WebSocketHandshakeRequest request("GET", m_url); if (m_useHixie76Protocol) request.addHeaderField("Upgrade", "WebSocket"); else request.addHeaderField("Upgrade", "websocket"); request.addHeaderField("Connection", "Upgrade"); request.addHeaderField("Host", hostName(m_url, m_secure)); if (m_useHixie76Protocol) request.addHeaderField("Origin", clientOrigin()); else request.addHeaderField("Sec-WebSocket-Origin", clientOrigin()); if (!m_clientProtocol.isEmpty()) request.addHeaderField("Sec-WebSocket-Protocol:", m_clientProtocol); KURL url = httpURLForAuthenticationAndCookies(); if (m_context->isDocument()) { Document* document = static_cast<Document*>(m_context); String cookie = cookieRequestHeaderFieldValue(document, url); if (!cookie.isEmpty()) request.addHeaderField("Cookie", cookie); // Set "Cookie2: <cookie>" if cookies 2 exists for url? } if (m_useHixie76Protocol) { request.addHeaderField("Sec-WebSocket-Key1", m_hixie76SecWebSocketKey1); request.addHeaderField("Sec-WebSocket-Key2", m_hixie76SecWebSocketKey2); request.setKey3(m_hixie76Key3); } else { request.addHeaderField("Sec-WebSocket-Key", m_secWebSocketKey); request.addHeaderField("Sec-WebSocket-Version", "8"); } return request; } void WebSocketHandshake::reset() { m_mode = Incomplete; } void WebSocketHandshake::clearScriptExecutionContext() { m_context = 0; } int WebSocketHandshake::readServerHandshake(const char* header, size_t len) { m_mode = Incomplete; int statusCode; String statusText; int lineLength = readStatusLine(header, len, statusCode, statusText); if (lineLength == -1) return -1; if (statusCode == -1) { m_mode = Failed; // m_failureReason is set inside readStatusLine(). return len; } LOG(Network, "response code: %d", statusCode); m_response.setStatusCode(statusCode); m_response.setStatusText(statusText); if (statusCode != 101) { m_mode = Failed; m_failureReason = "Unexpected response code: " + String::number(statusCode); return len; } m_mode = Normal; if (!strnstr(header, "\r\n\r\n", len)) { // Just hasn't been received fully yet. m_mode = Incomplete; return -1; } const char* p = readHTTPHeaders(header + lineLength, header + len); if (!p) { LOG(Network, "readHTTPHeaders failed"); m_mode = Failed; // m_failureReason is set inside readHTTPHeaders(). return len; } if (!checkResponseHeaders()) { LOG(Network, "header process failed"); m_mode = Failed; return p - header; } if (!m_useHixie76Protocol) { // Hybi-10 handshake is complete at this point. m_mode = Connected; return p - header; } // In hixie-76 protocol, server's handshake contains sixteen-byte data (called "challenge response") // after the header fields. if (len < static_cast<size_t>(p - header + sizeof(m_hixie76ExpectedChallengeResponse))) { // Just hasn't been received /expected/ yet. m_mode = Incomplete; return -1; } m_response.setChallengeResponse(static_cast<const unsigned char*>(static_cast<const void*>(p))); if (memcmp(p, m_hixie76ExpectedChallengeResponse, sizeof(m_hixie76ExpectedChallengeResponse))) { m_mode = Failed; return (p - header) + sizeof(m_hixie76ExpectedChallengeResponse); } m_mode = Connected; return (p - header) + sizeof(m_hixie76ExpectedChallengeResponse); } WebSocketHandshake::Mode WebSocketHandshake::mode() const { return m_mode; } String WebSocketHandshake::failureReason() const { return m_failureReason; } String WebSocketHandshake::serverWebSocketOrigin() const { return m_response.headerFields().get("sec-websocket-origin"); } String WebSocketHandshake::serverWebSocketLocation() const { return m_response.headerFields().get("sec-websocket-location"); } String WebSocketHandshake::serverWebSocketProtocol() const { return m_response.headerFields().get("sec-websocket-protocol"); } String WebSocketHandshake::serverSetCookie() const { return m_response.headerFields().get("set-cookie"); } String WebSocketHandshake::serverSetCookie2() const { return m_response.headerFields().get("set-cookie2"); } String WebSocketHandshake::serverUpgrade() const { return m_response.headerFields().get("upgrade"); } String WebSocketHandshake::serverConnection() const { return m_response.headerFields().get("connection"); } String WebSocketHandshake::serverWebSocketAccept() const { return m_response.headerFields().get("sec-websocket-accept"); } String WebSocketHandshake::serverWebSocketExtensions() const { return m_response.headerFields().get("sec-websocket-extensions"); } const WebSocketHandshakeResponse& WebSocketHandshake::serverHandshakeResponse() const { return m_response; } KURL WebSocketHandshake::httpURLForAuthenticationAndCookies() const { KURL url = m_url.copy(); bool couldSetProtocol = url.setProtocol(m_secure ? "https" : "http"); ASSERT_UNUSED(couldSetProtocol, couldSetProtocol); return url; } // Returns the header length (including "\r\n"), or -1 if we have not received enough data yet. // If the line is malformed or the status code is not a 3-digit number, // statusCode and statusText will be set to -1 and a null string, respectively. int WebSocketHandshake::readStatusLine(const char* header, size_t headerLength, int& statusCode, String& statusText) { // Arbitrary size limit to prevent the server from sending an unbounded // amount of data with no newlines and forcing us to buffer it all. static const int maximumLength = 1024; statusCode = -1; statusText = String(); const char* space1 = 0; const char* space2 = 0; const char* p; size_t consumedLength; for (p = header, consumedLength = 0; consumedLength < headerLength; p++, consumedLength++) { if (*p == ' ') { if (!space1) space1 = p; else if (!space2) space2 = p; } else if (*p == '\0') { // The caller isn't prepared to deal with null bytes in status // line. WebSockets specification doesn't prohibit this, but HTTP // does, so we'll just treat this as an error. m_failureReason = "Status line contains embedded null"; return p + 1 - header; } else if (*p == '\n') break; } if (consumedLength == headerLength) return -1; // We have not received '\n' yet. const char* end = p + 1; int lineLength = end - header; if (lineLength > maximumLength) { m_failureReason = "Status line is too long"; return maximumLength; } // The line must end with "\r\n". if (lineLength < 2 || *(end - 2) != '\r') { m_failureReason = "Status line does not end with CRLF"; return lineLength; } if (!space1 || !space2) { m_failureReason = "No response code found: " + trimConsoleMessage(header, lineLength - 2); return lineLength; } String statusCodeString(space1 + 1, space2 - space1 - 1); if (statusCodeString.length() != 3) // Status code must consist of three digits. return lineLength; for (int i = 0; i < 3; ++i) if (statusCodeString[i] < '0' || statusCodeString[i] > '9') { m_failureReason = "Invalid status code: " + statusCodeString; return lineLength; } bool ok = false; statusCode = statusCodeString.toInt(&ok); ASSERT(ok); statusText = String(space2 + 1, end - space2 - 3); // Exclude "\r\n". return lineLength; } const char* WebSocketHandshake::readHTTPHeaders(const char* start, const char* end) { m_response.clearHeaderFields(); Vector<char> name; Vector<char> value; for (const char* p = start; p < end; p++) { name.clear(); value.clear(); for (; p < end; p++) { switch (*p) { case '\r': if (name.isEmpty()) { if (p + 1 < end && *(p + 1) == '\n') return p + 2; m_failureReason = "CR doesn't follow LF at " + trimConsoleMessage(p, end - p); return 0; } m_failureReason = "Unexpected CR in name at " + trimConsoleMessage(name.data(), name.size()); return 0; case '\n': m_failureReason = "Unexpected LF in name at " + trimConsoleMessage(name.data(), name.size()); return 0; case ':': break; default: name.append(*p); continue; } if (*p == ':') { ++p; break; } } for (; p < end && *p == 0x20; p++) { } for (; p < end; p++) { switch (*p) { case '\r': break; case '\n': m_failureReason = "Unexpected LF in value at " + trimConsoleMessage(value.data(), value.size()); return 0; default: value.append(*p); } if (*p == '\r') { ++p; break; } } if (p >= end || *p != '\n') { m_failureReason = "CR doesn't follow LF after value at " + trimConsoleMessage(p, end - p); return 0; } AtomicString nameStr = AtomicString::fromUTF8(name.data(), name.size()); String valueStr = String::fromUTF8(value.data(), value.size()); if (nameStr.isNull()) { m_failureReason = "Invalid UTF-8 sequence in header name"; return 0; } if (valueStr.isNull()) { m_failureReason = "Invalid UTF-8 sequence in header value"; return 0; } LOG(Network, "name=%s value=%s", nameStr.string().utf8().data(), valueStr.utf8().data()); m_response.addHeaderField(nameStr, valueStr); } ASSERT_NOT_REACHED(); return 0; } bool WebSocketHandshake::checkResponseHeaders() { const String& serverWebSocketLocation = this->serverWebSocketLocation(); const String& serverWebSocketOrigin = this->serverWebSocketOrigin(); const String& serverWebSocketProtocol = this->serverWebSocketProtocol(); const String& serverUpgrade = this->serverUpgrade(); const String& serverConnection = this->serverConnection(); const String& serverWebSocketAccept = this->serverWebSocketAccept(); const String& serverWebSocketExtensions = this->serverWebSocketExtensions(); if (serverUpgrade.isNull()) { m_failureReason = "Error during WebSocket handshake: 'Upgrade' header is missing"; return false; } if (serverConnection.isNull()) { m_failureReason = "Error during WebSocket handshake: 'Connection' header is missing"; return false; } if (m_useHixie76Protocol) { if (serverWebSocketOrigin.isNull()) { m_failureReason = "Error during WebSocket handshake: 'Sec-WebSocket-Origin' header is missing"; return false; } if (serverWebSocketLocation.isNull()) { m_failureReason = "Error during WebSocket handshake: 'Sec-WebSocket-Location' header is missing"; return false; } } else { if (serverWebSocketAccept.isNull()) { m_failureReason = "Error during WebSocket handshake: 'Sec-WebSocket-Accept' header is missing"; return false; } } if (!equalIgnoringCase(serverUpgrade, "websocket")) { m_failureReason = "Error during WebSocket handshake: 'Upgrade' header value is not 'WebSocket'"; return false; } if (!equalIgnoringCase(serverConnection, "upgrade")) { m_failureReason = "Error during WebSocket handshake: 'Connection' header value is not 'Upgrade'"; return false; } if (m_useHixie76Protocol) { if (clientOrigin() != serverWebSocketOrigin) { m_failureReason = "Error during WebSocket handshake: origin mismatch: " + clientOrigin() + " != " + serverWebSocketOrigin; return false; } if (clientLocation() != serverWebSocketLocation) { m_failureReason = "Error during WebSocket handshake: location mismatch: " + clientLocation() + " != " + serverWebSocketLocation; return false; } if (!m_clientProtocol.isEmpty() && m_clientProtocol != serverWebSocketProtocol) { m_failureReason = "Error during WebSocket handshake: protocol mismatch: " + m_clientProtocol + " != " + serverWebSocketProtocol;<|fim▁hole|> if (serverWebSocketAccept != m_expectedAccept) { m_failureReason = "Error during WebSocket handshake: Sec-WebSocket-Accept mismatch"; return false; } if (!serverWebSocketExtensions.isNull()) { // WebSocket protocol extensions are not supported yet. // We do not send Sec-WebSocket-Extensions header in our request, thus // servers should not return this header, either. m_failureReason = "Error during WebSocket handshake: Sec-WebSocket-Extensions header is invalid"; return false; } } return true; } } // namespace WebCore #endif // ENABLE(WEB_SOCKETS)<|fim▁end|>
return false; } } else {
<|file_name|>dist.rs<|end_file_name|><|fim▁begin|>//! This module implements middleware to serve the compiled emberjs //! frontend use std::error::Error; use conduit::{Request, Response, Handler}; use conduit_static::Static; use conduit_middleware::AroundMiddleware; use util::RequestProxy; // Can't derive debug because of Handler and Static. #[allow(missing_debug_implementations)] pub struct Middleware { handler: Option<Box<Handler>>, dist: Static, } <|fim▁hole|> dist: Static::new("dist"), } } } impl AroundMiddleware for Middleware { fn with_handler(&mut self, handler: Box<Handler>) { self.handler = Some(handler); } } impl Handler for Middleware { fn call(&self, req: &mut Request) -> Result<Response, Box<Error + Send>> { // First, attempt to serve a static file. If we're missing a static // file, then keep going. match self.dist.call(req) { Ok(ref resp) if resp.status.0 == 404 => {} ret => return ret, } // Second, if we're requesting html, then we've only got one page so // serve up that page. Otherwise proxy on to the rest of the app. let wants_html = req.headers() .find("Accept") .map(|accept| accept.iter().any(|s| s.contains("html"))) .unwrap_or(false); // If the route starts with /api, just assume they want the API // response. Someone is either debugging or trying to download a crate. let is_api_path = req.path().starts_with("/api"); if wants_html && !is_api_path { self.dist.call(&mut RequestProxy { other: req, path: Some("/index.html"), method: None, }) } else { self.handler.as_ref().unwrap().call(req) } } }<|fim▁end|>
impl Default for Middleware { fn default() -> Middleware { Middleware { handler: None,
<|file_name|>client.py<|end_file_name|><|fim▁begin|># Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urllib from oslo_log import log as logging from oslo_serialization import jsonutils import requests import six LOG = logging.getLogger(__name__) <|fim▁hole|> This provides a decoded version of the Requests response which include a json decoded body, far more convenient for testing that returned structures are correct, or using parts of returned structures in tests. This class is a simple wrapper around dictionaries for API responses in tests. It includes extra attributes so that they can be inspected in addition to the attributes. All json responses from Nova APIs are dictionary compatible, or blank, so other possible base classes are not needed. """ status = 200 """The HTTP status code as an int""" content = "" """The Raw HTTP response body as a string""" body = {} """The decoded json body as a dictionary""" headers = {} """Response headers as a dictionary""" def __init__(self, response): """Construct an API response from a Requests response :param response: a ``requests`` library response """ super(APIResponse, self).__init__() self.status = response.status_code self.content = response.content if self.content: self.body = jsonutils.loads(self.content) self.headers = response.headers def __str__(self): # because __str__ falls back to __repr__ we can still use repr # on self but add in the other attributes. return "<Response body:%r, status_code:%s>" % (self.body, self.status) class OpenStackApiException(Exception): def __init__(self, message=None, response=None): self.response = response if not message: message = 'Unspecified error' if response: _status = response.status_code _body = response.content message = ('%(message)s\nStatus Code: %(_status)s\n' 'Body: %(_body)s' % {'message': message, '_status': _status, '_body': _body}) super(OpenStackApiException, self).__init__(message) class OpenStackApiAuthenticationException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = "Authentication error" super(OpenStackApiAuthenticationException, self).__init__(message, response) class OpenStackApiAuthorizationException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = "Authorization error" super(OpenStackApiAuthorizationException, self).__init__(message, response) class OpenStackApiNotFoundException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = "Item not found" super(OpenStackApiNotFoundException, self).__init__(message, response) class TestOpenStackClient(object): """Simple OpenStack API Client. This is a really basic OpenStack API client that is under our control, so we can make changes / insert hooks for testing """ def __init__(self, auth_user, auth_key, auth_uri, project_id=None): super(TestOpenStackClient, self).__init__() self.auth_result = None self.auth_user = auth_user self.auth_key = auth_key self.auth_uri = auth_uri if project_id is None: self.project_id = "6f70656e737461636b20342065766572" else: self.project_id = project_id self.microversion = None def request(self, url, method='GET', body=None, headers=None): _headers = {'Content-Type': 'application/json'} _headers.update(headers or {}) response = requests.request(method, url, data=body, headers=_headers) return response def _authenticate(self): if self.auth_result: return self.auth_result auth_uri = self.auth_uri headers = {'X-Auth-User': self.auth_user, 'X-Auth-Key': self.auth_key, 'X-Auth-Project-Id': self.project_id} response = self.request(auth_uri, headers=headers) http_status = response.status_code LOG.debug("%(auth_uri)s => code %(http_status)s", {'auth_uri': auth_uri, 'http_status': http_status}) if http_status == 401: raise OpenStackApiAuthenticationException(response=response) self.auth_result = response.headers return self.auth_result def api_request(self, relative_uri, check_response_status=None, strip_version=False, **kwargs): auth_result = self._authenticate() # NOTE(justinsb): httplib 'helpfully' converts headers to lower case base_uri = auth_result['x-server-management-url'] if strip_version: # NOTE(vish): cut out version number and tenant_id base_uri = '/'.join(base_uri.split('/', 3)[:-1]) full_uri = '%s/%s' % (base_uri, relative_uri) headers = kwargs.setdefault('headers', {}) headers['X-Auth-Token'] = auth_result['x-auth-token'] if self.microversion: headers['X-OpenStack-Nova-API-Version'] = self.microversion response = self.request(full_uri, **kwargs) http_status = response.status_code LOG.debug("%(relative_uri)s => code %(http_status)s", {'relative_uri': relative_uri, 'http_status': http_status}) if check_response_status: if http_status not in check_response_status: if http_status == 404: raise OpenStackApiNotFoundException(response=response) elif http_status == 401: raise OpenStackApiAuthorizationException(response=response) else: raise OpenStackApiException( message="Unexpected status code", response=response) return response def _decode_json(self, response): resp = APIResponse(status=response.status_code) if response.content: resp.body = jsonutils.loads(response.content) return resp def api_get(self, relative_uri, **kwargs): kwargs.setdefault('check_response_status', [200]) return APIResponse(self.api_request(relative_uri, **kwargs)) def api_post(self, relative_uri, body, **kwargs): kwargs['method'] = 'POST' if body: headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = 'application/json' kwargs['body'] = jsonutils.dumps(body) kwargs.setdefault('check_response_status', [200, 202]) return APIResponse(self.api_request(relative_uri, **kwargs)) def api_put(self, relative_uri, body, **kwargs): kwargs['method'] = 'PUT' if body: headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = 'application/json' kwargs['body'] = jsonutils.dumps(body) kwargs.setdefault('check_response_status', [200, 202, 204]) return APIResponse(self.api_request(relative_uri, **kwargs)) def api_delete(self, relative_uri, **kwargs): kwargs['method'] = 'DELETE' kwargs.setdefault('check_response_status', [200, 202, 204]) return APIResponse(self.api_request(relative_uri, **kwargs)) ##################################### # # Convenience methods # # The following are a set of convenience methods to get well known # resources, they can be helpful in setting up resources in # tests. All of these convenience methods throw exceptions if they # get a non 20x status code, so will appropriately abort tests if # they fail. # # They all return the most relevant part of their response body as # decoded data structure. # ##################################### def get_server(self, server_id): return self.api_get('/servers/%s' % server_id).body['server'] def get_servers(self, detail=True, search_opts=None): rel_url = '/servers/detail' if detail else '/servers' if search_opts is not None: qparams = {} for opt, val in six.iteritems(search_opts): qparams[opt] = val if qparams: query_string = "?%s" % urllib.urlencode(qparams) rel_url += query_string return self.api_get(rel_url).body['servers'] def post_server(self, server): response = self.api_post('/servers', server).body if 'reservation_id' in response: return response else: return response['server'] def put_server(self, server_id, server): return self.api_put('/servers/%s' % server_id, server).body def post_server_action(self, server_id, data): return self.api_post('/servers/%s/action' % server_id, data).body def delete_server(self, server_id): return self.api_delete('/servers/%s' % server_id) def get_image(self, image_id): return self.api_get('/images/%s' % image_id).body['image'] def get_images(self, detail=True): rel_url = '/images/detail' if detail else '/images' return self.api_get(rel_url).body['images'] def post_image(self, image): return self.api_post('/images', image).body['image'] def delete_image(self, image_id): return self.api_delete('/images/%s' % image_id) def get_flavor(self, flavor_id): return self.api_get('/flavors/%s' % flavor_id).body['flavor'] def get_flavors(self, detail=True): rel_url = '/flavors/detail' if detail else '/flavors' return self.api_get(rel_url).body['flavors'] def post_flavor(self, flavor): return self.api_post('/flavors', flavor).body['flavor'] def delete_flavor(self, flavor_id): return self.api_delete('/flavors/%s' % flavor_id) def post_extra_spec(self, flavor_id, spec): return self.api_post('/flavors/%s/os-extra_specs' % flavor_id, spec) def get_volume(self, volume_id): return self.api_get('/os-volumes/%s' % volume_id).body['volume'] def get_volumes(self, detail=True): rel_url = '/os-volumes/detail' if detail else '/os-volumes' return self.api_get(rel_url).body['volumes'] def post_volume(self, volume): return self.api_post('/os-volumes', volume).body['volume'] def delete_volume(self, volume_id): return self.api_delete('/os-volumes/%s' % volume_id) def get_snapshot(self, snap_id): return self.api_get('/os-snapshots/%s' % snap_id).body['snapshot'] def get_snapshots(self, detail=True): rel_url = '/os-snapshots/detail' if detail else '/os-snapshots' return self.api_get(rel_url).body['snapshots'] def post_snapshot(self, snapshot): return self.api_post('/os-snapshots', snapshot).body['snapshot'] def delete_snapshot(self, snap_id): return self.api_delete('/os-snapshots/%s' % snap_id) def get_server_volume(self, server_id, attachment_id): return self.api_get('/servers/%s/os-volume_attachments/%s' % (server_id, attachment_id) ).body['volumeAttachment'] def get_server_volumes(self, server_id): return self.api_get('/servers/%s/os-volume_attachments' % (server_id)).body['volumeAttachments'] def post_server_volume(self, server_id, volume_attachment): return self.api_post('/servers/%s/os-volume_attachments' % (server_id), volume_attachment ).body['volumeAttachment'] def delete_server_volume(self, server_id, attachment_id): return self.api_delete('/servers/%s/os-volume_attachments/%s' % (server_id, attachment_id)) def post_server_metadata(self, server_id, metadata): post_body = {'metadata': {}} post_body['metadata'].update(metadata) return self.api_post('/servers/%s/metadata' % server_id, post_body).body['metadata'] def get_server_groups(self, all_projects=None): if all_projects: return self.api_get( '/os-server-groups?all_projects').body['server_groups'] else: return self.api_get('/os-server-groups').body['server_groups'] def get_server_group(self, group_id): return self.api_get('/os-server-groups/%s' % group_id).body['server_group'] def post_server_groups(self, group): response = self.api_post('/os-server-groups', {"server_group": group}) return response.body['server_group'] def delete_server_group(self, group_id): self.api_delete('/os-server-groups/%s' % group_id) def get_instance_actions(self, server_id): return self.api_get('/servers/%s/os-instance-actions' % (server_id)).body['instanceActions']<|fim▁end|>
class APIResponse(object): """Decoded API Response
<|file_name|>sqoop_properties.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import logging import os from sqoop.conf import SQOOP_CONF_DIR LOG = logging.getLogger(__name__) _PROPERTIES_DICT = None _CONF_SQOOP_AUTHENTICATION_TYPE = 'org.apache.sqoop.security.authentication.type' def reset(): global _PROPERTIES_DICT _PROPERTIES_DICT = None def get_props(): if _PROPERTIES_DICT is None: _parse_properties() return _PROPERTIES_DICT def has_sqoop_has_security(): return get_props().get(_CONF_SQOOP_AUTHENTICATION_TYPE, 'SIMPLE').upper() == 'KERBEROS' <|fim▁hole|>def _parse_properties(): global _PROPERTIES_DICT properties_file = os.path.join(SQOOP_CONF_DIR.get(), 'sqoop.properties') _PROPERTIES_DICT = _parse_site(properties_file) def _parse_site(site_path): try: with open(site_path, 'r') as f: data = f.read() except IOError as err: if err.errno != errno.ENOENT: LOG.error('Cannot read from "%s": %s' % (site_path, err)) return data = "" return dict([line.split('=', 1) for line in data.split('\n') if '=' in line and not line.startswith('#')])<|fim▁end|>
<|file_name|>animes-stream24_tv.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from resources.lib.gui.gui import cGui from resources.lib.gui.guiElement import cGuiElement from resources.lib.handler.requestHandler import cRequestHandler from resources.lib.parser import cParser from resources.lib.handler.ParameterHandler import ParameterHandler from resources.lib import logger from resources.lib.config import cConfig import re, time, xbmcgui SITE_IDENTIFIER = 'animes-stream24_tv' SITE_NAME = 'Animes-Stream24' SITE_ICON = 'as24.png' URL_MAIN = 'http://as.animes-stream24.tv/' URL_MAIN_2 = 'http://as.anime-stream24.co/' #BACKUP URL def load(): oGui = cGui() params = ParameterHandler() logger.info("Load %s" % SITE_NAME) if showAdult(): params.setParam('entryMode', "a_z") oGui.addFolder(cGuiElement('A BIS Z', SITE_IDENTIFIER, 'showMovies'), params) params.setParam('entryMode', "top_animes") oGui.addFolder(cGuiElement('Top', SITE_IDENTIFIER, 'showMovies'), params) params.setParam('entryMode', "new") oGui.addFolder(cGuiElement('Neuste Animes', SITE_IDENTIFIER, 'showMovies'), params) params.setParam('entryMode', "a_z") oGui.addFolder(cGuiElement('Suche', SITE_IDENTIFIER, 'showSearch'), params) else: oGui.addFolder(cGuiElement('Um Inhalte sehen zu können, muss der Adult Content aktiviert werden. \n(Klicke hier, um diese zu öffnen)', SITE_IDENTIFIER, 'getConf'), params) oGui.setEndOfDirectory() def showMovies(sURL = False, sGui = False, sSearchText = ""): oGui = sGui if sGui else cGui() if not sURL: sURL = URL_MAIN params = ParameterHandler() eMode = "" if not eMode: eMode = params.getValue('entryMode') else: eMode = "ERROR" if "top_animes" in eMode: pattern = 'class="separator".*?<a href="([^"]+)".*?' #link pattern += '<img src="([^"]+)".*?' #img pattern += '([^><]+)</a>' #titel elif "a_z" in eMode: pattern = "<option value='([^']+)'>([^><]+)</option>" #link, titel elif "new" in eMode: sURL = sURL + "search?updated-max=" + time.strftime("%Y-%m-%d") + "T08:48:00%2B01:00&max-results=" pattern = False aResult = False else: if not sGui: oGui.showInfo('xStream', eMode) return if pattern: oRequestHandler = cRequestHandler(sURL) sHtmlContent = oRequestHandler.request() oParser = cParser() aResult = oParser.parse(sHtmlContent, pattern) if not aResult[0]: if not sGui: oGui.showInfo('xStream', 'Es wurde kein Eintrag gefunden') return total = len(aResult[1]) qual = "1080" if "top_animes" in eMode: for link, img, title in aResult[1]: guiElement = cGuiElement(title, SITE_IDENTIFIER, 'getEpisodes') guiElement.setThumbnail(img) #guiElement.setDescription(plot.decode('iso-8859-1')) guiElement.setMediaType('movie') params.setParam('eUrl',link) oGui.addFolder(guiElement, params, bIsFolder = True, iTotal = total) elif "new" in eMode: ymd_date = time.strftime("%Y-%m-%d") params.setParam('eUrl',sURL + "11") oGui.addFolder(cGuiElement("Zeige letzte 11 Einträge (" + ymd_date +")", SITE_IDENTIFIER, 'getEpisodes'),params) params.setParam('eUrl',sURL + "22") oGui.addFolder(cGuiElement("Zeige letzte 22 Einträge (" + ymd_date +")", SITE_IDENTIFIER, 'getEpisodes'),params) params.setParam('eUrl',sURL + "44") oGui.addFolder(cGuiElement("Zeige letzte 44 Einträge (" + ymd_date +")", SITE_IDENTIFIER, 'getEpisodes'),params) elif "a_z" in eMode: #sPattern = params.getValue('search_on') sPattern = sSearchText; a = [] reg_ex = re.compile('.*' + sSearchText + '.*?', re.I) pattern = "class='post-title entry-title'><a href='([^']+)'>" #link pattern += "([^><]+).*?" #ep_Name pattern += '<img.*?src="([^"]+)".*?bung:.*?/>' #Img pattern += "(.*?)<br./>" #plot /Gen if sPattern: for link, title in aResult[1]: if re.search(reg_ex,title): guiElement = cGuiElement(title, SITE_IDENTIFIER, 'getEpisodes') sHtml = cRequestHandler(link).request() a = oParser.parse(sHtml, pattern) #xbmcgui.Dialog().ok("SHOW",str(a[1][1][3])) #.encode("utf-8")) guiElement.setThumbnail(a[1][1][2]) guiElement.setDescription(a[1][1][3]) params.setParam('eUrl',link) oGui.addFolder(guiElement, params, bIsFolder = True, iTotal = total) else: for link, title in aResult[1]: guiElement = cGuiElement(title, SITE_IDENTIFIER, 'getEpisodes') """ TODO: ERROR HANDLING OUT OF RANGE - LAEDT SONST EWIG FUER DEN REQUEST EVENTL AUFTEILEN ODER EINZELNE THREADS?? ---------------------------------------------------------------------- sHtml = cRequestHandler(link).request() a = oParser.parse(sHtml, pattern) guiElement.setThumbnail(a[1][1][2]) guiElement.setDescription(a[1][1][3].decode('iso-8859-1').encode('utf-8')) """ params.setParam('eUrl',link) oGui.addFolder(guiElement, params, bIsFolder = True, iTotal = total) oGui.setView('movies') oGui.setEndOfDirectory() def getEpisodes(): oGui = cGui() oParser = cParser() params = ParameterHandler() eUrl = ParameterHandler().getValue('eUrl') eUrl = eUrl.replace(" ", "%20"); eUrl = eUrl.replace("+", "%2B") #Decode(Leerzeichen, +) isMovie = True pattern = "class='post-title entry-title'><a href='([^']+)'>" #link pattern += "([^><]+).*?" #ep_Name pattern += '<img.*?src="([^"]+)".*?bung:.*?/>' #Img pattern += "(.*?)<br./>" #plot /Gen sHtmlContent = cRequestHandler(eUrl).request() aResult = oParser.parse(sHtmlContent, pattern) bResult = oParser.parse(sHtmlContent, "older-link'.*?href='([^']+)'") if not aResult[0]: oGui.showInfo('xStream', 'Es wurde kein Eintrag gefunden') return total = len(aResult[1]) for link, title, img, plot in aResult[1]: GuiElement = cGuiElement(title, SITE_IDENTIFIER, 'getHosters') GuiElement.setMediaType('movie' if isMovie else 'tvshow') GuiElement.setThumbnail(img) plot.replace('<b>', '') GuiElement.setDescription(plot)#.decode('iso-8859-1').encode('utf-8')) #GuiElement.setYear(year) params.setParam('siteUrl', link) params.setParam('sName', title) oGui.addFolder(GuiElement, params, False, total) if 'entry-title' in cRequestHandler(bResult[1][0]).request(): params.setParam('eUrl', bResult[1][0]) oGui.addFolder(cGuiElement("Weitere Episoden -->", SITE_IDENTIFIER, 'getEpisodes'),params) #logger.info('[[suhmser]] %s: ' % str(bResult[1][0])) oGui.setView('movies') oGui.setEndOfDirectory() def getHosters(): oParams = ParameterHandler() oGui = cGui() sUrl = oParams.getValue('siteUrl') sHtmlContent = cRequestHandler(sUrl).request() sPattern = '<iframe.*?(?:src|SRC)="([^"]+).*?(?:\<\/if|\<\/IF)' sPattern_bkp = '-[0-9]".?>.*?(?:src|SRC)="([^"]+)".*?' #sPattern_alone = '#fragment.*?src|SRC="//([^"]+)".*?>(?:' #s_url aResult = cParser().parse(sHtmlContent, sPattern) <|fim▁hole|> #test_link = "*.mp4" #hosters.append({'link': test_link, 'name': 'Testing_link', 'resolveable': True}) reg_ex = re.compile('(?://|\.)?(?:[a-zA-Z0-9]+\.)?([a-zA-Z0-9-.]{0,})\..*?\/.*?\/?', re.I) for sUrl in aResult[1]: sName = re.search(reg_ex, sUrl).group(1) if not sUrl.startswith('http'): if sUrl.startswith('//'): sUrl = 'http:%s' % sUrl else: sUrl = 'http://%s' % sUrl hosters.append({'link': sUrl, 'name': sName, 'resolveable': True}) if hosters: hosters.append('getHosterUrl') return hosters else: oGui.showInfo('xStream', 'Es wurde kein Eintrag gefunden') def getHosterUrl(sUrl=False): if not sUrl: sUrl = ParameterHandler().getValue('sUrl') if 'animes-stream24.net' in sUrl: sUrl = _as24_resolver(sUrl) res = True elif 'ani-stream.com' in sUrl: #DOT|net=off sUrl = _anistream_resolver(sUrl) res = True elif 'uploadkadeh.com' in sUrl: sUrl = 'http://uploadkadeh.com:182/d/' + _webtv_resolver(sUrl) + '/video.mp4' res = True elif sUrl in set(['web.tv','plublicvideohost.org']): #or bigfile.to sUrl = _webtv_resolver(sUrl) res = True else: res = False results = [] result = {} #logger.info('[[suhmser]] Url %s after:getHosterUrl(): ' % sUrl) result['streamUrl'] = sUrl result['resolved'] = res results.append(result) return results #play > [sUrl,[BOOL]] def _as24_resolver(url): oParams = ParameterHandler() sHtmlContent = cRequestHandler(url).request() #sUrl = re.search("\{file:'([^']+)'", sHtmlContent, re.I).group(1) #redi = re.search("(http://.*?/)", sUrl, re.I).group(1) #getHosturl http://[HOST.DMN]/ aResult = cParser().parse(sHtmlContent, '''\{file:.?(?:"|')([^'"]+)(?:"|').+''') redi = "http://as.animes-stream24.net/" # \.open\('(.+)'\)\; for sUrl in aResult[1]: if sUrl and redi: #sUrl = _redirectHoster(sUrl, sUrl, False) return sUrl else: return sUrl def _webtv_resolver(url): oParams = ParameterHandler() sHtmlContent = cRequestHandler(url).request() if 'web.tv' in url: aResult = cParser().parse(sHtmlContent, '"sources.*?src.."(.*?)"}]') if 'publicvideohost.org' in url: pattern = '(?:file|source)+?:.?(?:"|' pattern += "')(.*?.flv+)(?:" pattern += '"|' + "')" aResult = cParser().parse(sHtmlContent, pattern) #(?:file|source)+?:.?(?:"|')(.*?.[a-zA-Z0-9]{2,3}+)(?:"|') if 'uploadkadeh.com' in url: aResult = cParser().parse(sHtmlContent, 'player_code.*?video\|([^\|]+)') #else # TODO: check mit urlresolver? for sUrl in aResult[1]: if sUrl: return sUrl else: xbmcgui.Dialog().ok( "Fehler" , 'Error 666: ' + sUrl) def _anistream_resolver(o_url): oParams = ParameterHandler() sHtmlContent = cRequestHandler(o_url).request() match = re.findall("file\s*:\s*(?:'|\")(.+?)(?:\'|\")", sHtmlContent) if match: url = match[0] #content = requests.get(url, headers=headers).text.replace('\\','') if url: try: #r = requests.head(url[0], headers=headers) #if r.headers.get('location'): #url = [r.headers.get('location')] #logger.info('[[suhmser]] Url %s _anistream_Resolver(): ' % url) url = _redirectHoster(url) except: pass return url else: xbmc.executebuiltin('Notification(Info: Error: URL,)') def _redirectHoster(url, ref = False, cookie = False): if url: import urllib2 ua = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30" def req(url): request = urllib2.Request(url) request.add_header('User-Agent', ua) if ref: request.add_header('Referer', ref) return request request = req(url) response = urllib2.urlopen(request, timeout=30) #Bypass Timeout Issues #response=page.read();page.close() if cookie or 'Set-Cookie' in response.info(): request = req(URL_MAIN) res = urllib2.urlopen(request, timeout=12) cookie = res.info()['Set-Cookie']#Get Cookieinfo if cookie: request.add_header('Cookie',cookie) if url != response.geturl(): return response.geturl() else: return url def showSearch(): oGui = cGui() sSearchText = oGui.showKeyBoard() if not sSearchText: return # Filter Typ als Parameter (optional) sType = ParameterHandler().getValue('sType') if sType: sSearchText = sSearchText.strip() + "&type="+sType _search(False, sSearchText) oGui.setEndOfDirectory() def _search(oGui, sSearchText): if not sSearchText: return ParameterHandler().setParam('search_on', sSearchText) showMovies(False, oGui, sSearchText) def showAdult(): oConfig = cConfig() if oConfig.getSetting('showAdult')=='true': return True return False def getConf(): oGui = cGui() oGui.openSettings()<|fim▁end|>
if aResult[0]: hosters = []
<|file_name|>text.rs<|end_file_name|><|fim▁begin|>use crate::cx::*; #[derive(Clone)] pub enum Wrapping { Char, Word, Line, None, Ellipsis(f32) } #[derive(Clone, Copy)] pub struct TextStyle { pub font: Font, pub font_size: f32, pub brightness: f32, pub curve: f32, pub line_spacing: f32, pub top_drop: f32, pub height_factor: f32, } impl Default for TextStyle { fn default() -> Self { TextStyle { font: Font::default(), font_size: 8.0, brightness: 1.0, curve: 0.7, line_spacing: 1.4, top_drop: 1.1, height_factor: 1.3, } } } #[derive(Clone)] pub struct Text { pub class: ClassId, pub text_style: TextStyle, pub shader: Shader, pub color: Color, pub z: f32, pub wrapping: Wrapping, pub font_scale: f32, pub do_h_scroll: bool, pub do_v_scroll: bool, } impl Text { pub fn proto(cx: &mut Cx) -> Self { Self { class: ClassId::base(), text_style: TextStyle::default(), shader: cx.add_shader(Self::def_text_shader(), "TextAtlas"), do_h_scroll: true, do_v_scroll: true, z: 0.0, wrapping: Wrapping::Word, color: color("white"), font_scale: 1.0, /* font: cx.load_font_path("resources/Ubuntu-R.ttf"), font_size: 8.0, font_scale: 1.0, line_spacing: 1.4, top_drop: 1.1, height_factor: 1.3, curve: 0.7, brightness: 1.0, */ } } pub fn instance_font_tc() -> InstanceVec4 {uid!()} pub fn instance_color() -> InstanceColor {uid!()} pub fn instance_x() -> InstanceFloat {uid!()} pub fn instance_y() -> InstanceFloat {uid!()} pub fn instance_w() -> InstanceFloat {uid!()} pub fn instance_h() -> InstanceFloat {uid!()} pub fn instance_z() -> InstanceFloat {uid!()} pub fn instance_base_x() -> InstanceFloat {uid!()} pub fn instance_base_y() -> InstanceFloat {uid!()} pub fn instance_font_size() -> InstanceFloat {uid!()} pub fn instance_marker() -> InstanceFloat {uid!()} pub fn instance_char_offset() -> InstanceFloat {uid!()} pub fn uniform_zbias() -> UniformFloat {uid!()} pub fn uniform_brightness() -> UniformFloat {uid!()} pub fn uniform_curve() -> UniformFloat {uid!()} pub fn uniform_view_do_scroll() -> UniformVec2 {uid!()} pub fn def_text_shader() -> ShaderGen { // lets add the draw shader lib let mut sg = ShaderGen::new(); sg.geometry_vertices = vec![0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0]; sg.geometry_indices = vec![0, 1, 2, 0, 3, 2]; sg.compose(shader_ast!({ let geom: vec2<Geometry>; let texturez: texture2d<Texture>; let font_tc: Self::instance_font_tc(); let color: Self::instance_color(); let x: Self::instance_x(); let y: Self::instance_y(); let w: Self::instance_w(); let h: Self::instance_h(); let z: Self::instance_z(); let base_x: Self::instance_base_x(); let base_y: Self::instance_base_y(); let font_size: Self::instance_font_size(); let char_offset: Self::instance_char_offset(); let marker: Self::instance_marker(); let tex_coord1: vec2<Varying>; let tex_coord2: vec2<Varying>; let tex_coord3: vec2<Varying>; let clipped: vec2<Varying>; let rect: vec4<Varying>; let zbias: Self::uniform_zbias(); let brightness: Self::uniform_brightness(); let curve: Self::uniform_curve(); let view_do_scroll: Self::uniform_view_do_scroll(); fn pixel() -> vec4 { let dx = dfdx(vec2(tex_coord1.x * 4096.0, 0.)).x; let dp = 1.0 / 4096.0; // basic hardcoded mipmapping so it stops 'swimming' in VR let s = 1.0; if dx > 5.0 { s = 0.7; } else if dx > 2.75 { // combine 3x3 s = ( sample2d(texturez, tex_coord3.xy + vec2(0., 0.)).z + sample2d(texturez, tex_coord3.xy + vec2(dp, 0.)).z + sample2d(texturez, tex_coord3.xy + vec2(0., dp)).z + sample2d(texturez, tex_coord3.xy + vec2(dp, dp)).z ) * 0.25; } else if dx > 1.75 { // combine 3x3 s = sample2d(texturez, tex_coord3.xy).z; } else if dx > 1.3 { // combine 2x2 s = sample2d(texturez, tex_coord2.xy).y; } else { s = sample2d(texturez, tex_coord1.xy).x; } s = pow(s, curve); return vec4(s * color.rgb * brightness * color.a, s * color.a); // + color("#a"); } fn vertex() -> vec4 { let shift: vec2 = -view_scroll * view_do_scroll; // + vec2(x, y); let min_pos = vec2(x, y); let max_pos = vec2(x + w, y - h); clipped = clamp( mix(min_pos, max_pos, geom) + shift, view_clip.xy, view_clip.zw ); let normalized: vec2 = (clipped - min_pos - shift) / (max_pos - min_pos); rect = vec4(min_pos.x, min_pos.y, max_pos.x, max_pos.y) + shift.xyxy; tex_coord1 = mix( font_tc.xy, font_tc.zw, normalized.xy ); tex_coord2 = mix( font_tc.xy, font_tc.xy + (font_tc.zw - font_tc.xy) * 0.75, normalized.xy ); tex_coord3 = mix( font_tc.xy, font_tc.xy + (font_tc.zw - font_tc.xy) * 0.6, normalized.xy ); return camera_projection * (camera_view * (view_transform * vec4(clipped.x, clipped.y, z + zbias, 1.))); } })) } pub fn begin_text(&mut self, cx: &mut Cx) -> AlignedInstance { //let font_id = self.font.font_id.unwrap(); let inst = cx.new_instance(&self.shader, 0); let aligned = cx.align_instance(inst); let text_style = &self.text_style; let brightness = text_style.brightness; let curve = text_style.curve; if aligned.inst.need_uniforms_now(cx) { // cx.fonts[font_id].width as f32 , cx.fonts[font_id].height as f32 aligned.inst.push_uniform_texture_2d_id(cx, cx.fonts_atlas.texture_id); //tex_size //aligned.inst.push_uniform_vec2(cx, self.font.texture_size); aligned.inst.push_uniform_float(cx, 0.); aligned.inst.push_uniform_float(cx, brightness); aligned.inst.push_uniform_float(cx, curve); aligned.inst.push_uniform_vec2f( cx, if self.do_h_scroll {1.0}else {0.0}, if self.do_v_scroll {1.0}else {0.0} ); //aligned.inst.push_uniform_float(cx, if self.do_subpixel_aa{1.0}else{0.0}); //list_clip //area.push_uniform_vec4f(cx, -50000.0,-50000.0,50000.0,50000.0); } return aligned } pub fn add_text<F>(&mut self, cx: &mut Cx, geom_x: f32, geom_y: f32, char_offset: usize, aligned: &mut AlignedInstance, chunk: &[char], mut char_callback: F) where F: FnMut(char, usize, f32, f32) -> f32 { let text_style = &self.text_style; let mut geom_x = geom_x; let mut char_offset = char_offset; let font_id = text_style.font.font_id.unwrap(); let cxfont = &mut cx.fonts[font_id]; let dpi_factor = cx.current_dpi_factor; //let geom_y = (geom_y * dpi_factor).floor() / dpi_factor; let atlas_page_id = cxfont.get_atlas_page_id(dpi_factor, text_style.font_size); let font = &mut cxfont.font_loaded.as_ref().unwrap(); let font_size_logical = text_style.font_size * 96.0 / (72.0 * font.units_per_em); let font_size_pixels = font_size_logical * dpi_factor; let atlas_page = &mut cxfont.atlas_pages[atlas_page_id]; <|fim▁hole|> let instance = { let cxview = &mut cx.views[aligned.inst.view_id]; let draw_call = &mut cxview.draw_calls[aligned.inst.draw_call_id]; &mut draw_call.instance }; for wc in chunk { let unicode = *wc as usize; let glyph_id = font.char_code_to_glyph_index_map[unicode]; if glyph_id >= font.glyphs.len() { println!("GLYPHID OUT OF BOUNDS {} {} len is {}", unicode, glyph_id, font.glyphs.len()); continue; } let glyph = &font.glyphs[glyph_id]; let advance = glyph.horizontal_metrics.advance_width * font_size_logical * self.font_scale; // snap width/height to pixel granularity let w = ((glyph.bounds.p_max.x - glyph.bounds.p_min.x) * font_size_pixels).ceil() + 1.0; let h = ((glyph.bounds.p_max.y - glyph.bounds.p_min.y) * font_size_pixels).ceil() + 1.0; // this one needs pixel snapping let min_pos_x = geom_x + font_size_logical * glyph.bounds.p_min.x; let min_pos_y = geom_y - font_size_logical * glyph.bounds.p_min.y + text_style.font_size * text_style.top_drop; // compute subpixel shift let subpixel_x_fract = min_pos_x - (min_pos_x * dpi_factor).floor() / dpi_factor; let subpixel_y_fract = min_pos_y - (min_pos_y * dpi_factor).floor() / dpi_factor; // scale and snap it let scaled_min_pos_x = geom_x + font_size_logical * self.font_scale * glyph.bounds.p_min.x - subpixel_x_fract; let scaled_min_pos_y = geom_y - font_size_logical * self.font_scale * glyph.bounds.p_min.y + text_style.font_size * self.font_scale * text_style.top_drop - subpixel_y_fract; // only use a subpixel id for really small fonts let subpixel_id = if text_style.font_size>12.0 { 0 } else { // subtle 64 index subpixel id ((subpixel_y_fract * 7.0) as usize) << 3 | (subpixel_x_fract * 7.0) as usize }; let tc = if let Some(tc) = &atlas_page.atlas_glyphs[glyph_id][subpixel_id] { tc } else { // see if we can fit it // allocate slot cx.fonts_atlas.atlas_todo.push(CxFontsAtlasTodo { subpixel_x_fract, subpixel_y_fract, font_id, atlas_page_id, glyph_id, subpixel_id }); atlas_page.atlas_glyphs[glyph_id][subpixel_id] = Some( cx.fonts_atlas.alloc_atlas_glyph(&cxfont.path, w, h) ); atlas_page.atlas_glyphs[glyph_id][subpixel_id].as_ref().unwrap() }; // lets allocate let marker = char_callback(*wc, char_offset, geom_x, advance); let data = [ tc.tx1, tc.ty1, tc.tx2, tc.ty2, self.color.r, // color self.color.g, self.color.b, self.color.a, scaled_min_pos_x, scaled_min_pos_y, w * self.font_scale / dpi_factor, h * self.font_scale / dpi_factor, self.z + 0.00001 * min_pos_x, //slight z-bias so we don't get z-fighting with neighbouring chars overlap a bit geom_x, geom_y, text_style.font_size, char_offset as f32, // char_offset marker, // marker ]; instance.extend_from_slice(&data); // !TODO make sure a derived shader adds 'empty' values here. geom_x += advance; char_offset += 1; aligned.inst.instance_count += 1; } } pub fn end_text(&mut self, cx: &mut Cx, aligned: &AlignedInstance) -> Area { cx.update_aligned_instance_count(aligned); aligned.inst.into() } pub fn draw_text(&mut self, cx: &mut Cx, text: &str) -> Area { let mut aligned = self.begin_text(cx); let mut chunk = Vec::new(); let mut width = 0.0; let mut elipct = 0; let text_style = &self.text_style; let font_size = text_style.font_size; let line_spacing = text_style.line_spacing; let height_factor = text_style.height_factor; let mut iter = text.chars().peekable(); let font_id = text_style.font.font_id.unwrap(); let font_size_logical = text_style.font_size * 96.0 / (72.0 * cx.fonts[font_id].font_loaded.as_ref().unwrap().units_per_em); while let Some(c) = iter.next() { let last = iter.peek().is_none(); let mut emit = last; let mut newline = false; let slot = if c < '\u{10000}' { cx.fonts[font_id].font_loaded.as_ref().unwrap().char_code_to_glyph_index_map[c as usize] } else { 0 }; if c == '\n' { emit = true; newline = true; } if slot != 0 { let glyph = &cx.fonts[font_id].font_loaded.as_ref().unwrap().glyphs[slot]; width += glyph.horizontal_metrics.advance_width * font_size_logical * self.font_scale; match self.wrapping { Wrapping::Char => { chunk.push(c); emit = true }, Wrapping::Word => { chunk.push(c); if c == ' ' || c == '\t' || c == ',' || c == '\n'{ emit = true; } }, Wrapping::Line => { chunk.push(c); if c == 10 as char || c == 13 as char { emit = true; } newline = true; }, Wrapping::None => { chunk.push(c); }, Wrapping::Ellipsis(ellipsis_width) => { if width>ellipsis_width { // output ... if elipct < 3 { chunk.push('.'); elipct += 1; } } else { chunk.push(c) } } } } if emit { let height = font_size * height_factor * self.font_scale; let geom = cx.walk_turtle(Walk { width: Width::Fix(width), height: Height::Fix(height), margin: Margin::zero() }); self.add_text(cx, geom.x, geom.y, 0, &mut aligned, &chunk, | _, _, _, _ | {0.0}); width = 0.0; chunk.truncate(0); if newline { cx.turtle_new_line_min_height(font_size * line_spacing * self.font_scale); } } } self.end_text(cx, &aligned) } // looks up text with the behavior of a text selection mouse cursor pub fn find_closest_offset(&self, cx: &Cx, area: &Area, pos: Vec2) -> usize { let scroll_pos = area.get_scroll_pos(cx); let spos = Vec2 {x: pos.x + scroll_pos.x, y: pos.y + scroll_pos.y}; let x_o = area.get_instance_offset(cx, Self::instance_base_x().instance_type()).unwrap(); let y_o = area.get_instance_offset(cx, Self::instance_base_y().instance_type()).unwrap(); let w_o = area.get_instance_offset(cx, Self::instance_w().instance_type()).unwrap(); let font_size_o = area.get_instance_offset(cx, Self::instance_font_size().instance_type()).unwrap(); let char_offset_o = area.get_instance_offset(cx, Self::instance_char_offset().instance_type()).unwrap(); let read = area.get_read_ref(cx); let text_style = &self.text_style; let line_spacing = text_style.line_spacing; let mut index = 0; if let Some(read) = read { while index < read.count { let y = read.buffer[read.offset + y_o + index * read.slots]; let font_size = read.buffer[read.offset + font_size_o + index * read.slots]; if y + font_size * line_spacing > spos.y { // alright lets find our next x while index < read.count { let x = read.buffer[read.offset + x_o + index * read.slots]; let y = read.buffer[read.offset + y_o + index * read.slots]; //let font_size = read.buffer[read.offset + font_size_o + index* read.slots]; let w = read.buffer[read.offset + w_o + index * read.slots]; if x > spos.x + w * 0.5 || y > spos.y { let prev_index = if index == 0 {0}else {index - 1}; let prev_x = read.buffer[read.offset + x_o + prev_index * read.slots]; let prev_w = read.buffer[read.offset + w_o + index * read.slots]; if index < read.count - 1 && prev_x > spos.x + prev_w { // fix newline jump-back return read.buffer[read.offset + char_offset_o + index * read.slots] as usize; } return read.buffer[read.offset + char_offset_o + prev_index * read.slots] as usize; } index += 1; } } index += 1; } if read.count == 0 { return 0 } return read.buffer[read.offset + char_offset_o + (read.count - 1) * read.slots] as usize; } return 0 } pub fn get_monospace_base(&self, cx: &Cx) -> Vec2 { let font_id = self.text_style.font.font_id.unwrap(); let font = cx.fonts[font_id].font_loaded.as_ref().unwrap(); let slot = font.char_code_to_glyph_index_map[33]; let glyph = &font.glyphs[slot]; //let font_size = if let Some(font_size) = font_size{font_size}else{self.font_size}; Vec2 { x: glyph.horizontal_metrics.advance_width * (96.0 / (72.0 * font.units_per_em)), y: self.text_style.line_spacing } } }<|fim▁end|>
<|file_name|>notes.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- import urllib2 import time, os import sys, fileinput from bs4 import BeautifulSoup class Grabber(object): def use(self): print "" print "* This just Fucking whatever for grabbing." print "* For license just fucking to change this. ^Summon Agus Created." print "-------------------------------------------------------------------------------------" print "[1] Add Note : ./notes.py addnote <file_name> <title> <content> <tag1, tag2>" print "[2] List Note : ./notes.py listnote <file_name>" print "[3] Delete Note : ./notes.py delnote <file_name> <numb_line>" print "[4] Add Url to Grab : ./notes.py addurl <file_name> <url>" print "-------------------------------------------------------------------------------------" print "" def addnote(self, args): self.help = "./notes.py addnote <file_name> <title> <content> <tag1, tag2>" if len(sys.argv) < 5: sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help) f_note_out = sys.argv[2] title = sys.argv[3] content = sys.argv[4] tags = sys.argv[5] print "[+] Your args is: ./notes.py", args, f_note_out, title, content, tags time.sleep(1) print "[>] Waiting for save your note ..." my_note = '"'+title+'": "'+content+'"'+ ' tag: '+ tags """ [?] Trying if file was exists, so note will add in new line. [?] But, if file is doesn't exists, this program will automatically write file with your first argument. """ try: f_note = open(f_note_out, 'a') my_note = my_note + '\n' except IOError: f_note = open(f_note_out, 'w') my_note = '\n' + my_note f_note.write(my_note) f_note.close() time.sleep(1) print "[>] Your note was saved in <"+ f_note_out +">" def listnote(self, args): self.help = "./notes.py listnote <file_name>" if len(sys.argv) < 2: sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help) print "[+] Your args is: ./notes.py", args, sys.argv[2] try: with open(sys.argv[2], "r") as f: print " -------------------------------------- " for line in f: print line.replace("\n", "") time.sleep(0.3) print " -------------------------------------- " except IOError: sys.exit("[-] File Doesn't exists!!"+\ "\n[?] This your path now: " +str(os.getcwd())+\ "\n[?] This files and folders in your path now: " + str(os.listdir('.')) ) def delnote(self, args): self.help = "./notes.py delnote <file_name> <numb_line>" if len(sys.argv) < 3: sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help) f_note_out = str(sys.argv[2]) try: for numb, line in enumerate(fileinput.input(f_note_out, inplace=True)): #start index from 0 if numb == int(sys.argv[3]): continue else: sys.stdout.write(line) sys.exit("[+] Success delete line <"+sys.argv[3]+"> in file of <"+ f_note_out +">") except OSError: sys.exit("[-] File Doesn't exists!!"+\ "\n[?] This your path now: " +str(os.getcwd())+\ "\n[?] This files and folders in your path now: " + str(os.listdir('.')) ) def addurl(self, args): self.help = "./notes.py addurl <file_name> <url>" if len(sys.argv) < 3: sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help) url = str(sys.argv[3]) f_note_out = str(sys.argv[2]) print "[+] Your args is: ./notes.py", args, f_note_out, url agent = {'User-Agent':'Mozilla/5.0'} request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() soup = BeautifulSoup(page) title = soup.title.string.encode('utf-8') descriptions = soup.findAll(attrs={"name":"description"})[0]['content'].encode('utf-8') keywords = soup.findAll(attrs={"name":"keywords"})[0]['content'].encode('utf-8') print "[>] Waiting for save your note ..." time.sleep(1) my_note = '"'+title+'": "'+descriptions+'"'+ ' tag: '+ keywords try: f_note = open(f_note_out, 'a') my_note = my_note + '\n' except IOError: f_note = open(f_note_out, 'w') my_note = '\n' + my_note f_note.write(my_note) f_note.close() time.sleep(1) print "[>] Your url note was saved in <"+ f_note_out +">" if __name__ == "__main__": mome = Grabber() try: args = str(sys.argv[1])<|fim▁hole|> elif args == 'delnote': mome.delnote(args) elif args == 'addurl': mome.addurl(args) else: print "Funcking damn!, please checkout your input" except IndexError: mome.use()<|fim▁end|>
if args == 'addnote': mome.addnote(args) elif args == 'listnote': mome.listnote(args)
<|file_name|>bits.rs<|end_file_name|><|fim▁begin|>//! View bits in memory with turtles //! //! This example uses [@myrrlyn]'s [`bitvec`] crate to turn data into strings of //! bits, and then draws them on the screen. //! //! You are encouraged to change both the data used to seed the turtle, and the //! `bitvec` calls that control how the turtle acts, to see what changes. //! //! [@myrrlyn]: //github.com/myrrlyn //! [`bitvec`]: //crates.io/crates/bitvec // This imports the things we need from `bitvec`, including the `Bits` trait for // the `.view_bits::<_>()` method we use to view memory. use bitvec::prelude::*; use turtle::Turtle; <|fim▁hole|>/// This text will be inspected as individual bytes, and drawn on the screen. /// You can change it to see what different text looks like when viewed as bits. /// /// The example program will print more information about the parts of the text /// to the console while the turtle draws, so that you can see how each glyph /// corresponds to parts of the rendered memory. static TEXT: &str = "¡Hola, mundo! 🌍🌏🌎"; /// This number will have its bit pattern printed on screen. Rust provides some /// interesting numbers in its standard library; you can replace this with other /// numbers to see what they look like. Pi is provided as the default solely /// because it is well-known, and has an interesting pattern. const NUMBER: f32 = std::f32::consts::PI; /// This controls the width of the drawn line for each bit. const BIT_WIDTH: f64 = 20.0; /// This controls the vertical spacing between rows of bit lines. const BIT_HEIGHT: f64 = 10.0; /// Set the horizontal spacing between successive bits in a row const BIT_MARGIN: f64 = BIT_WIDTH / 2.0; /// Compute the total width of a bit plus its spacing const BIT_BOX: f64 = BIT_WIDTH + BIT_MARGIN; fn main() { // This block sets up the turtle to draw bits more or less centered in the // screen. The turtle works by walking horizontally for each bit in a byte, // then backtracking and walking vertically to the next byte. let mut turtle = Turtle::new(); // The turtle starts in the center of the screen, but we want to move it // around before drawing. turtle.pen_up(); // Compute the boundaries of the part of the screen where the turtle will // draw. We expect to be drawing eight bits, with half to the right of // center and half to the left. let right_edge = BIT_BOX * 8.0 / 2.0; // We also expect to be drawing a row for each byte in the text, with an // additional separator row for each *character*, half above and half below // the center of the screen. This computes how many rows of text we will // draw, then moves the turtle appropriately. let byte_rows = TEXT.len(); let char_gaps = TEXT.chars().count(); let top_edge = BIT_HEIGHT * ((byte_rows + char_gaps) as f64 / 2.0); // The turtle starts from the top right of the region, turtle.forward(top_edge); turtle.right(90.0); turtle.forward(right_edge); // and walks left turtle.left(180.0); draw_text(&mut turtle, TEXT); // The `draw_number` function reads bits from left to right, so the turtle // should also walk from left to right. The `draw_number` function expects // that it will be drawing rows sixteen bits long, so it needs to move // forward another four bits' worth of space in order to be in the correct // spot. turtle.forward(8.0 * BIT_BOX / 2.0); turtle.forward(16.0 * BIT_BOX / 2.0); // Then, it needs to turn around, to walk in the other direction. turtle.right(180.0); draw_number(&mut turtle, NUMBER); } /// Draws the bits of a text span on the screen. fn draw_text(turtle: &mut Turtle, text: &str) { // Rust strings can iterate over their individual characters. This block // loops over characters, collecting their start point in the text so that // we can grab the encoded bytes of each one. let mut row_num = 0; for (char_num, (start, codepoint)) in text.char_indices().enumerate() { println!("Character {}: {}", char_num, codepoint); // Each character has a variable width, so we need to find that. let byte_count = codepoint.len_utf8(); // And then collect the bytes of the string that make up the character. // `start` gives us the starting position in the text sequence, and // `byte_count` gives us the length in bytes of the character, so we // need to select the range beginning at `start`, running for // `byte_count`. Another style of writing this that you might see in // Rust libraries is `[start ..][.. length]`. let row: &[u8] = &text.as_bytes()[start .. start + byte_count]; // For each byte (`u8`), we use `bitvec` to make a view into its bits. // `bitvec` provides the `.view_bits::<_>()` method on Rust integers for // easy access to its view types. // // The `Lsb0` means that the view moves from least significant bit to // most significant. Since we want to display on screen the most // significant bit on the left, and the least on the right, the turtle // will have to move from right to left to match. // // The `Lsb0` and `Msb0` types describe different ways to view the same // data. You can read more about them in the `bitvec` docs, and at // Wikipedia: // https://docs.rs/bitvec/0.16.1/bitvec/cursor/index.html // https://en.wikipedia.org/wiki/Endianness#Bit_endianness for byte in row { println!(" Byte {:02}:\n Value: 0x{:02X}\n Bits: {:08b}", row_num, byte, byte); let bits: &BitSlice<_, _> = byte.view_bits::<Lsb0>(); // Then we draw the byte's bits as a row draw_row(turtle, bits); // And go to the next row next_row(turtle, 90.0); row_num += 1; } // This puts a dividing line between each *character* in the text. // Some characters may have more than one byte, and those bytes will be // grouped together. delimit(turtle, 8.0 * BIT_BOX - BIT_MARGIN); } } /// Draws the bits of a number on screen. fn draw_number(turtle: &mut Turtle, number: f32) { // `bitvec` can look at more than just `u8`. Let's try looking at the bits // that represent a number! // // Some numbers, like `f32`, have special rules for their representation in // bits. `bitvec` only knows about raw bits, so it does not provide direct // support for `f32`. Rust lets us get the bit representation from an `f32` // with the method `to_bits(f32) -> u32`, which forgets about the `f32` // rules and uses the number's storage as ordinary bits. // // You can read more about the rules for `f32`'s storage in memory, and // behavior in programs, here: // https://en.wikipedia.org/wiki/Double-precision_floating-point_format let raw_number: u32 = number.to_bits(); // `bitvec` can also view bits from left to right, with `Msb0`. let bits: &BitSlice<_, _> = raw_number.view_bits::<Msb0>(); // The `&BitSlice` type acts just like `&[bool]`, so it comes with a // `.chunks` method which divides it into smaller pieces. `bitvec` can take // any number, not just multiples of 8, but 16 is a convenient number to // look at. Try changing it to a different number, like 10, to see what // happens! for (num, row) in bits.chunks(16).enumerate() { println!("Row {} bits: {:b}", num, row); // Each chunk produced is a smaller `&BitSlice`, just like // `&[bool].chunks` produces smaller `&[bool]`s, so we can draw it. draw_row(turtle, row); next_row(turtle, -90.0); } // Reader exercise: // // The IEEE-754 format for `f32` numbers separates them into three parts: // // 1. The sign marks whether the number is positive or negative: 1 bit // 2. The exponent marks how far from zero the number is: 8 bits // 3. The fraction describes the number: 23 bits. // // Using these widths (1 bit, 8 bits, 23 bits), the knowledge that // `&BitSlice` is a normal Rust slice, and the API documentation for // `std::iter::Iterator`, see if you can display each portion of an `f32` // as its own row. // // Hints: // // - The variable `bits` is set up to view the entire number, from most // significant bit to least. // - You can get access to a structure that performs iteration by calling // `bits.iter()`. // - You can use the `Iterator::by_ref` method to prevent `Iterator` adapter // functions from destroying the source iterator. // - `&BitSlice` is an ordinary Rust slice, so you can use `[start .. end]` // range indexing to get smaller pieces of it. } /// Draw a row of bits on the screen. /// /// This takes a reference to a turtle, which draws, and a reference to a slice /// of bits, which provides the data to draw. /// /// Note that this works whether we're going through the bits left to right /// (`Msb0`) or right to left (`Lsb0`), because we assume that the turtle is /// going to start on the correct side and be facing the correct way for this /// drawing to work. fn draw_row<O, T>(turtle: &mut Turtle, row: &BitSlice<O, T>) where O: BitOrder, T: BitStore { // `&BitSlice` can iterate over bits. It is just like `&[bool]`, and so it // produces `&bool` for each loop. for bit in row.iter().by_val() { // This checks if the bit produced by the row is `1` or `0`, and sets // the pen color to black (`1`) or light grey (`0`) if bit { turtle.set_pen_color("black"); } else { turtle.set_pen_color("light grey"); } // For each bit, the loop puts down the pen to draw a line of the bit's // color, then picks up the pen to add some horizontal spacing between // them. turtle.pen_down(); turtle.forward(BIT_WIDTH); turtle.pen_up(); turtle.forward(BIT_MARGIN); } // Rewind the turtle for _ in 0 .. row.len() { turtle.backward(BIT_BOX); } } /// Produces a separator line to demark different sections of memory. fn delimit(turtle: &mut Turtle, width: f64) { turtle.set_pen_color("grey"); turtle.pen_down(); turtle.forward(width); turtle.backward(width); next_row(turtle, 90.0); } /// Moves the turtle down a row fn next_row(turtle: &mut Turtle, angle: f64) { turtle.pen_up(); turtle.left(angle); turtle.forward(BIT_HEIGHT); turtle.right(angle); }<|fim▁end|>
// Modify these constants to change the behavior of the example.
<|file_name|>heap_test.go<|end_file_name|><|fim▁begin|>package pqueue import ( "testing" "github.com/stretchr/testify/assert" ) func Test_AdjustHeap(t *testing.T) { list := []Node{Node{0, 0}, Node{1, 1}, Node{2, 2}, Node{3, 3}, Node{4, 1}, Node{6, 6}} adjustHeap(list, 1, len(list)-1)<|fim▁hole|><|fim▁end|>
assert.Equal(t, 6, list[1].value) }
<|file_name|>mainwindow.cpp<|end_file_name|><|fim▁begin|>/** Copyright (C) 2008-2013 Stefan Kolb. This file is part of the program pso (particle swarm optimization). The program pso is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. The program pso is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with pso. If not, see <http://www.gnu.org/licenses/>. */ #include "mainwindow.h" #include "swarmcontrolwidget.h" #include "functionoptionswidget.h" #include "variationcontrolwidget.h" #include "particleviewwidget.h" #include "graphwidget.h" MainWindow::MainWindow( QWidget *parent, Qt::WindowFlags flags ) : QMainWindow( parent, flags ) { variation_max_iterations = 10000; swarm.setDimension( 2 ); setWindowTitle( "Particle Swarm Optimization" ); setDockOptions( QMainWindow::ForceTabbedDocks | QMainWindow::AllowTabbedDocks | QMainWindow::AnimatedDocks | QMainWindow::AllowTabbedDocks ); { QWidget *widget = new QWidget( this ); QBoxLayout *layout = new QBoxLayout( QBoxLayout::TopToBottom, widget ); ui_functionviewer = new FunctionViewer( this ); ui_functionviewer->setSwarm( &swarm ); connect( ui_functionviewer, SIGNAL( particleNumberChanged() ), this, SLOT( particleNumberChanged() ) ); layout->addWidget( ui_functionviewer ); variation_graph = new QwtPlot( QwtText( "Variation" ), this ); variation_graph->setAxisTitle( QwtPlot::xBottom, "particles" ); variation_graph->setAxisTitle( QwtPlot::yLeft, "iterations" ); variation_graph->enableAxis( QwtPlot::yRight ); variation_graph->setAxisTitle( QwtPlot::yRight, "fitness" ); variation_graph->insertLegend( new QwtLegend() ); variation_graph->setHidden( true ); layout->addWidget( variation_graph ); variation_variable_curve = new QwtPlotCurve( "iterations" ); variation_variable_curve->setPen( QPen( QColor( 255, 0, 0 ) ) ); variation_variable_curve->attach( variation_graph ); variation_fitness_curve = new QwtPlotCurve( "fitness" ); variation_fitness_curve->setPen( QPen( QColor( 0, 0, 255 ) ) ); variation_fitness_curve->attach( variation_graph ); variation_fitness_curve->setYAxis( QwtPlot::yRight ); widget->setLayout( layout ); setCentralWidget( widget ); } timer = new QTimer( this ); connect( timer, SIGNAL( timeout() ), this, SLOT( timerTimeOut() ) ); QMenu *mode = menuBar()->addMenu( tr( "&Mode" ) ); QActionGroup *actiongroup = new QActionGroup( this ); connect( actiongroup, SIGNAL( triggered( QAction * ) ), this, SLOT( changeApplicationMode( QAction * ) ) ); menu_actions_container["mode:3d"] = actiongroup->addAction( "&3D" ); menu_actions_container["mode:3d"]->setCheckable( true ); menu_actions_container["mode:3d"]->setChecked( true ); mode->addAction( menu_actions_container["mode:3d"] ); menu_actions_container["mode:variation"] = actiongroup->addAction( "&Variation" ); menu_actions_container["mode:variation"]->setCheckable( true ); mode->addAction( menu_actions_container["mode:variation"] ); mode->addSeparator(); menu_actions_container["mode:exit"] = mode->addAction( "&Exit", this, SLOT( close() ) ); QMenu *options = menuBar()->addMenu( tr( "&Options" ) ); menu_actions_container["options:default view"] = options->addAction( "&Default View", ui_functionviewer, SLOT( setDefaultView() ) ); menu_actions_container["options:min function color"] = options->addAction( "&Min Function Color", ui_functionviewer, SLOT( changeFunctionMinColor() ) ); menu_actions_container["options:max function color"] = options->addAction( "&Max Function Color", ui_functionviewer, SLOT( changeFunctionMaxColor() ) ); menu_actions_container["options:point size"] = options->addAction( "&Point Size", ui_functionviewer, SLOT( changePointSize() ) ); menu_actions_container["options:point color"] = options->addAction( "&Point Color", ui_functionviewer, SLOT( changePointColor() ) ); menu_actions_container["options:mini map"] = options->addAction( "&Mini Map" ); menu_actions_container["options:mini map"]->setCheckable( true ); menu_actions_container["options:mini map"]->setChecked( true ); connect( menu_actions_container["options:mini map"], SIGNAL( toggled( bool ) ), ui_functionviewer, SLOT( showMiniMap( bool ) ) ); menu_actions_container["options:mini map mark best"] = options->addAction( "&Mini Map Mark Best" ); menu_actions_container["options:mini map mark best"]->setCheckable( true ); menu_actions_container["options:mini map mark best"]->setChecked( true ); ui_functionviewer->showMiniMapGlobalBest( true ); connect( menu_actions_container["options:mini map mark best"], SIGNAL( toggled( bool ) ), ui_functionviewer, SLOT( showMiniMapGlobalBest( bool ) ) ); menu_actions_container["options:mark best"] = options->addAction( "&Mark Best" ); menu_actions_container["options:mark best"]->setCheckable( true ); menu_actions_container["options:mark best"]->setChecked( true ); ui_functionviewer->showGlobalBest3D( true ); connect( menu_actions_container["options:mark best"], SIGNAL( toggled( bool ) ), ui_functionviewer, SLOT( showGlobalBest3D( bool ) ) ); #ifdef USE_FTGL menu_actions_container["options:show axis"] = options->addAction( "&Show Axis" ); menu_actions_container["options:show axis"]->setCheckable( true ); menu_actions_container["options:show axis"]->setChecked( true ); connect( menu_actions_container["options:show axis"], SIGNAL( toggled( bool ) ), ui_functionviewer, SLOT( showAxis( bool ) ) ); if( !ui_functionviewer->isFTGLFontLoaded() ) { menu_actions_container["options:show axis"]->setEnabled( false ); menu_actions_container["options:show axis"]->setChecked( false ); } #endif menu_actions_container["options:trace particles"] = options->addAction( "&Trace Particles" ); menu_actions_container["options:trace particles"]->setCheckable( true ); menu_actions_container["options:trace particles"]->setChecked( false ); ui_functionviewer->showTraceParticles( false ); connect( menu_actions_container["options:trace particles"], SIGNAL( toggled( bool ) ), ui_functionviewer, SLOT( showTraceParticles( bool ) ) ); menu_actions_container["options:gl background color"] = options->addAction( "&GL Background Color", ui_functionviewer, SLOT( changeBackgroundColor() ) ); menu_actions_container["options:wireframe"] = options->addAction( "&Wireframe" ); menu_actions_container["options:wireframe"]->setCheckable( true ); connect( menu_actions_container["options:wireframe"], SIGNAL( toggled( bool ) ), this, SLOT( changeGLWireframe( bool ) ) ); menu_actions_container["options:points"] = options->addAction( "&Points" ); menu_actions_container["options:points"]->setCheckable( true ); connect( menu_actions_container["options:points"], SIGNAL( toggled( bool ) ), this, SLOT( changeGLPoint( bool ) ) ); options->addSeparator(); menu_actions_container["options:variation max iterations"] = options->addAction( "&Variation Max Iterations", this, SLOT( changeVariationMaxIterations() ) ); menu_actions_container["options:variation max iterations"]->setEnabled( false ); QMenu *dock_menu = new QMenu( tr( "Dock" ) ); ui_dockmanager = new DockManager( this, dock_menu ); menuBar()->addMenu( dock_menu ); setDockOptions( QMainWindow::VerticalTabs ); ui_dockwidgets["swarm control"] = new DockWidget( tr( "Swarm Control" ) ); ui_dockmanager->addDock( Qt::LeftDockWidgetArea, ui_dockwidgets["swarm control"] ); ui_swarm_control = new SwarmControlWidget( this, this ); ui_dockwidgets["swarm control"]->setWidget( ui_swarm_control ); ui_dockwidgets["function options"] = new DockWidget( tr( "Function Options" ) ); ui_dockmanager->addDock( Qt::LeftDockWidgetArea, ui_dockwidgets["function options"] ); ui_function_options = new FunctionOptionsWidget( this, this ); ui_dockwidgets["function options"]->setWidget( ui_function_options ); ui_dockwidgets["variation control"] = new DockWidget( tr( "Variation Coltrol" ) ); ui_dockmanager->addDock( Qt::LeftDockWidgetArea, ui_dockwidgets["variation control"] ); ui_variation_control = new VariationControlWidget( this, this ); ui_dockwidgets["variation control"]->setWidget( ui_variation_control ); ui_dockwidgets["particle view"] = new DockWidget( tr( "Particle View" ) ); ui_dockmanager->addDock( Qt::RightDockWidgetArea, ui_dockwidgets["particle view"] ); ui_particle_view = new ParticleViewWidget( this, this ); ui_dockwidgets["particle view"]->setWidget( ui_particle_view ); ui_dockwidgets["particle view"]->close(); ui_dockwidgets["graph"] = new DockWidget( tr( "Graph" ) ); ui_dockmanager->addDock( Qt::RightDockWidgetArea, ui_dockwidgets["graph"] ); ui_graph_widget = new GraphWidget( this, this ); ui_dockwidgets["graph"]->setWidget( ui_graph_widget ); ui_dockwidgets["graph"]->close(); setApplicationMode( PSOMode3DView ); ui_function_options->setFunction(); } void MainWindow::changeGLWireframe( bool w ) { if( menu_actions_container["options:points"]->isChecked() && w ) { menu_actions_container["options:points"]->setChecked( false ); } ui_functionviewer->setViewModeWireframe( w ); } void MainWindow::changeGLPoint( bool w ) { if( menu_actions_container["options:wireframe"]->isChecked() && w ) { menu_actions_container["options:wireframe"]->setChecked( false ); } ui_functionviewer->setViewModePoint( w ); } /** Handles the timer timeout callback! In both application modes the optimization is performed in this function. This includes all user interface element updates. */ void MainWindow::timerTimeOut() { switch( application_mode ) { case PSOMode3DView: try { bool check = false; if( swarm.m_swarm.empty() ) { ui_swarm_control->disableTimer(); } if( swarm.getCheckAbortCriterion() ) { check = !swarm.checkAbortCriterion(); } else { swarm.checkAbortCriterion(); //needed for auto velocity see: void Swarm::calculateMaxVelocity() check = true; } if( check ) { if( !ui_dockwidgets["particle view"]->isHidden() ) { ui_particle_view->updateView(); } if( !ui_dockwidgets["graph"]->isHidden() ) { ui_graph_widget->updateGraph(); } computeNextStep(); ui_swarm_control->showUsedIterations( swarm.getIterationStep() ); if( swarm.getBestParticle() ) { ui_swarm_control->showBestPartileFitness( swarm.getBestParticle()->getBestValue() ); ui_swarm_control->showCurrentBestFoundPosition( swarm.getBestParticle()->getBestPosition()[0], swarm.getBestParticle()->getBestPosition()[1] ); } if( ui_swarm_control->isAutoVelocityUsed() ) { ui_swarm_control->showCurrentMaxVelocity( swarm.getMaxVelocity() ); } ui_functionviewer->updateGL(); } else { ui_swarm_control->disableTimer(); } } catch( RuntimeError &err ) { ui_swarm_control->disableTimer(); showError( err ); } break; case PSOModeVariation: if( ui_variation_control->getToValue() <= ui_variation_control->getFromValue() ) { ui_variation_control->disableTimer(); } else { try { size_t average_number = ui_variation_control->getAverageNumber(); QString variable = ui_variation_control->getCurrentlyUsedVariable(); double average = 0.0; double average_fitness = 0.0; bool random = true; if( ui_swarm_control->isCreationModeRandom() ) { random = true; } else { random = false; } for( size_t i = 0; i < average_number; i++ ) { ui_swarm_control->setMaxVelocity(); unsigned int particle_number = ui_swarm_control->getParticleNumber(); if( variable == ui_variation_control->getVariationVariableNames()["particle"] ) { particle_number = ui_variation_control->getFromValue(); } { VectorN<double> range_min( 1 ), range_max( 1 ); ui_function_options->getFunctionRange( range_min, range_max ); getSwarm()->createSwarm( particle_number, range_min, range_max, random ); } if( variable == ui_variation_control->getVariationVariableNames()["c1"] ) { getSwarm()->setParameterC1( ui_variation_control->getFromValue() ); } else if( variable == ui_variation_control->getVariationVariableNames()["c2"] ) { getSwarm()->setParameterC2( ui_variation_control->getFromValue() ); } else if( variable == ui_variation_control->getVariationVariableNames()["c3"] ) { getSwarm()->setParameterC3( ui_variation_control->getFromValue() ); } else if( variable == ui_variation_control->getVariationVariableNames()["w"] ) {<|fim▁hole|> else if( variable == ui_variation_control->getVariationVariableNames()["radius"] ) { getSwarm()->setNeighbourRadius( ui_variation_control->getFromValue() ); } else if( variable == ui_variation_control->getVariationVariableNames()["max velocity"] ) { getSwarm()->setMaxVelocity( ui_variation_control->getFromValue() ); } average += getSwarm()->optimize( variation_max_iterations ); average_fitness += getSwarm()->getBestFitness(); } variation_variables_data.push_back( ui_variation_control->getFromValue() ); variation_iterations_data.push_back( average / static_cast<double>( average_number ) ); variation_fitness_data.push_back( average_fitness / static_cast<double>( average_number ) ); variation_variable_curve->setSamples( &*variation_variables_data.begin(), &*variation_iterations_data.begin(), variation_variables_data.size() ); variation_fitness_curve->setSamples( &*variation_variables_data.begin(), &*variation_fitness_data.begin(), variation_variables_data.size() ); variation_graph->replot(); ui_variation_control->setFromValue( ui_variation_control->getFromValue() + ui_variation_control->getStepValue() ); } catch( RuntimeError &err ) { ui_variation_control->disableTimer(); showError( err ); } } break; } } void MainWindow::showError( const RuntimeError &err ) { QString line; line.setNum( err.getLine() ); QMessageBox::warning( this, QString( "Error" ), QString::fromStdString( err.getFile() ) + QString( ":" ) + line + QString( "\n" ) + QString::fromStdString( err.getMessage() ) ); } void MainWindow::particleNumberChanged() { ui_swarm_control->setParticleNumber( swarm.m_swarm.size() ); } Swarm< Function > *MainWindow::getSwarm() { return &swarm; } FunctionViewer *MainWindow::getGLWidget() { return ui_functionviewer; } FunctionOptionsWidget *MainWindow::getFunctionOptionsWidget() { return ui_function_options; } SwarmControlWidget *MainWindow::getSwarmControlWidget() { return ui_swarm_control; } QwtPlot *MainWindow::getVariationPlotWidget() { return variation_graph; } VariationControlWidget *MainWindow::getStatisticControlWidget() { return ui_variation_control; } QTimer *MainWindow::getTimer() { return timer; } ParticleViewWidget *MainWindow::getParticleViewWidget() { return ui_particle_view; } GraphWidget *MainWindow::getGraphWidget() { return ui_graph_widget; } /** Sets the maximum number of allowed iterations in the application mode variations. */ void MainWindow::changeVariationMaxIterations() { size_t iterations; bool ok; iterations = QInputDialog::getInteger( this, "Max Iterations", "max iterations:", variation_max_iterations, 1, 10000000, 1, &ok ); if( ok ) { variation_max_iterations = iterations; } } void MainWindow::changeApplicationMode( QAction *action ) { if( action->text() == "&3D" ) { setApplicationMode( PSOMode3DView ); } else if( action->text() == "&Variation" ) { setApplicationMode( PSOModeVariation ); } else { setApplicationMode( PSOMode3DView ); } } /** This function enables and disables a couple of user interface elements dependent on \a mode. \param[in] mode */ void MainWindow::setApplicationMode( PSOMode mode ) { application_mode = mode; ui_swarm_control->changeApplicationMode( mode ); ui_function_options->changeApplicationMode( mode ); switch( mode ) { case PSOMode3DView: getTimer()->setInterval( ui_swarm_control->getCurrentTimerTimeout() ); getSwarm()->clear(); getSwarm()->setDimension( 2 ); variation_graph->setHidden( true ); ui_functionviewer->setVisible( true ); ui_dockwidgets["particle view"]->setEnabled( true ); ui_dockwidgets["particle view"]->close(); ui_dockwidgets["graph"]->setEnabled( true ); ui_dockwidgets["graph"]->close(); ui_dockwidgets["variation control"]->setEnabled( false ); ui_dockwidgets["variation control"]->close(); for( QMap<QString, QAction *>::iterator it = menu_actions_container.begin(); it != menu_actions_container.end(); it++ ) { if( it.key().contains( "options:" ) ) { it.value()->setEnabled( true ); } } menu_actions_container["options:variation max iterations"]->setEnabled( false ); if( !ui_functionviewer->isFTGLFontLoaded() ) { menu_actions_container["options:show axis"]->setEnabled( false ); } ui_dockmanager->updateDockMenu(); break; case PSOModeVariation: getTimer()->stop(); ui_functionviewer->setHidden( true ); variation_graph->setVisible( true ); ui_dockwidgets["particle view"]->setEnabled( false ); ui_dockwidgets["particle view"]->close(); ui_dockwidgets["graph"]->setEnabled( false ); ui_dockwidgets["graph"]->close(); ui_dockwidgets["variation control"]->show(); addDockWidget( Qt::LeftDockWidgetArea, ui_dockwidgets["variation control"] ); for( QMap<QString, QAction *>::iterator it = menu_actions_container.begin(); it != menu_actions_container.end(); it++ ) { if( it.key().contains( "options:" ) ) { it.value()->setEnabled( false ); } } menu_actions_container["options:variation max iterations"]->setEnabled( true ); ui_dockmanager->updateDockMenu(); ui_function_options->setFunction(); break; } } void MainWindow::clearVariationGraphData() { variation_variables_data.clear(); variation_iterations_data.clear(); variation_fitness_data.clear(); } PSOMode MainWindow::getCurrentApplicationMode() const { return application_mode; } /** The computation of the next time step is performed in this function. Additionally the array to display the particle trace is updated in this function. If particle tracing is enabled. */ void MainWindow::computeNextStep() { bool trace_particles = ui_functionviewer->isParticleTracingEnabled(); std::vector<std::vector<Vector<double> > > &trace_particle_container = ui_functionviewer->getTraceParticleContainer(); if( swarm.getIterationStep() == 0 ) { trace_particle_container.clear(); if( trace_particles ) { if( trace_particle_container.size() != swarm.m_swarm.size() ) { trace_particle_container.clear(); trace_particle_container.resize( swarm.m_swarm.size() ); } unsigned int i = 0; Swarm<Function>::particle_container::iterator begin = swarm.m_swarm.begin(), end = swarm.m_swarm.end(); for( Swarm<Function>::particle_container::iterator it( begin ); it != end; it++, i++ ) { trace_particle_container[i].push_back( Vector<double>( ( *it )->getPosition()[0], ( *it )->getPosition()[1], ( *it )->getCurrentValue() + 0.2 ) ); } } } swarm.computeNextStep(); if( trace_particles ) { if( trace_particle_container.size() != swarm.m_swarm.size() ) { trace_particle_container.clear(); trace_particle_container.resize( swarm.m_swarm.size() ); } unsigned int i = 0; Swarm<Function>::particle_container::iterator begin = swarm.m_swarm.begin(), end = swarm.m_swarm.end(); for( Swarm<Function>::particle_container::iterator it( begin ); it != end; it++, i++ ) { trace_particle_container[i].push_back( Vector<double>( ( *it )->getPosition()[0], ( *it )->getPosition()[1], ( *it )->getCurrentValue() + 0.2 ) ); } } }<|fim▁end|>
getSwarm()->setParameterW( ui_variation_control->getFromValue() ); }
<|file_name|>scaffold10.py<|end_file_name|><|fim▁begin|># This challenge is similar to the previous one. It operates under the same # premise that you will have to replace the check_equals_ function. In this # case, however, check_equals_ is called so many times that it wouldn't make # sense to hook where each one was called. Instead, use a SimProcedure to write # your own check_equals_ implementation and then hook the check_equals_ symbol # to replace all calls to scanf with a call to your SimProcedure. # # You may be thinking: # Why can't I just use hooks? The function is called many times, but if I hook # the address of the function itself (rather than the addresses where it is # called), I can replace its behavior everywhere. Furthermore, I can get the # parameters by reading them off the stack (with memory.load(regs.esp + xx)), # and return a value by simply setting eax! Since I know the length of the # function in bytes, I can return from the hook just before the 'ret' # instruction is called, which will allow the program to jump back to where it # was before it called my hook. # If you thought that, then congratulations! You have just invented the idea of # SimProcedures! Instead of doing all of that by hand, you can let the already- # implemented SimProcedures do the boring work for you so that you can focus on # writing a replacement function in a Pythonic way. # As a bonus, SimProcedures allow you to specify custom calling conventions, but # unfortunately it is not covered in this CTF. import angr import claripy import sys def main(argv): path_to_binary = argv[1] project = angr.Project(path_to_binary) initial_state = project.factory.entry_state() # Define a class that inherits angr.SimProcedure in order to take advantage # of Angr's SimProcedures. class ReplacementCheckEquals(angr.SimProcedure): # A SimProcedure replaces a function in the binary with a simulated one # written in Python. Other than it being written in Python, the function # acts largely the same as any function written in C. Any parameter after # 'self' will be treated as a parameter to the function you are replacing. # The parameters will be bitvectors. Additionally, the Python can return in # the ususal Pythonic way. Angr will treat this in the same way it would # treat a native function in the binary returning. An example: # # int add_if_positive(int a, int b) { # if (a >= 0 && b >= 0) return a + b;<|fim▁hole|> # could be simulated with... # # class ReplacementAddIfPositive(angr.SimProcedure): # def run(self, a, b): # if a >= 0 and b >=0: # return a + b # else: # return 0 # # Finish the parameters to the check_equals_ function. Reminder: # int check_equals_AABBCCDDEEFFGGHH(char* to_check, int length) { ... # (!) def run(self, to_check, ...???): # We can almost copy and paste the solution from the previous challenge. # Hint: Don't look up the address! It's passed as a parameter. # (!) user_input_buffer_address = ??? user_input_buffer_length = ??? # Note the use of self.state to find the state of the system in a # SimProcedure. user_input_string = self.state.memory.load( user_input_buffer_address, user_input_buffer_length ) check_against_string = ??? # Finally, instead of setting eax, we can use a Pythonic return statement # to return the output of this function. # Hint: Look at the previous solution. return claripy.If(???, ???, ???) # Hook the check_equals symbol. Angr automatically looks up the address # associated with the symbol. Alternatively, you can use 'hook' instead # of 'hook_symbol' and specify the address of the function. To find the # correct symbol, disassemble the binary. # (!) check_equals_symbol = ??? # :string project.hook_symbol(check_equals_symbol, ReplacementCheckEquals()) simulation = project.factory.simgr(initial_state) def is_successful(state): stdout_output = state.posix.dumps(sys.stdout.fileno()) return ??? def should_abort(state): stdout_output = state.posix.dumps(sys.stdout.fileno()) return ??? simulation.explore(find=is_successful, avoid=should_abort) if simulation.found: solution_state = simulation.found[0] solution = ??? print solution else: raise Exception('Could not find the solution') if __name__ == '__main__': main(sys.argv)<|fim▁end|>
# else return 0; # } #
<|file_name|>compiler-worker.js<|end_file_name|><|fim▁begin|>'use strict' var solc = require('solc/wrapper') var compileJSON = function () { return '' } var missingInputs = [] module.exports = function (self) { self.addEventListener('message', function (e) { var data = e.data switch (data.cmd) { case 'loadVersion': delete self.Module // NOTE: workaround some browsers? self.Module = undefined compileJSON = null self.importScripts(data.data) var compiler = solc(self.Module) <|fim▁hole|> missingInputs.push(path) return { 'error': 'Deferred import' } }) } catch (exception) { return JSON.stringify({ error: 'Uncaught JavaScript exception:\n' + exception }) } } self.postMessage({ cmd: 'versionLoaded', data: compiler.version() }) break case 'compile': missingInputs.length = 0 self.postMessage({cmd: 'compiled', job: data.job, data: compileJSON(data.input), missingInputs: missingInputs}) break } }, false) }<|fim▁end|>
compileJSON = function (input) { try { return compiler.compileStandardWrapper(input, function (path) {
<|file_name|>test_unicode_shift_jis.py<|end_file_name|><|fim▁begin|>############################################################################### # # Tests for XlsxWriter. # # SPDX-License-Identifier: BSD-2-Clause # Copyright (c), 2013-2022, John McNamara, [email protected] # from ..excel_comparison_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename('unicode_shift_jis.xlsx') self.set_text_file('unicode_shift_jis.txt') def test_create_file(self): """Test example file converting Unicode text.""" # Open the input file with the correct encoding. textfile = open(self.txt_filename, mode='r', encoding='shift_jis') # Create an new Excel file and convert the text data. workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() # Widen the first column to make the text clearer. worksheet.set_column('A:A', 50) # Start from the first cell. row = 0 col = 0 # Read the text file and write it to the worksheet. for line in textfile: # Ignore the comments in the sample file. if line.startswith('#'): continue # Write any other lines to the worksheet. worksheet.write(row, col, line.rstrip("\n")) row += 1 workbook.close()<|fim▁hole|><|fim▁end|>
textfile.close() self.assertExcelEqual()
<|file_name|>cross_validation_from_matrix_AUC_norm.py<|end_file_name|><|fim▁begin|>import sys, os sys.path.append(os.path.join(os.path.dirname(__file__), '..', '','')) import numpy as np #from skgraph import datasets from sklearn import svm #from skgraph.ioskgraph import * from math import sqrt import sys from sklearn.metrics import roc_auc_score #"sys.path.append('..\\..\\Multiple Kernel Learning\\Framework')" if len(sys.argv)<4: sys.exit("python cross_validation_from_matrix_norm.py inputMatrix.libsvm C outfile") c=float(sys.argv[2]) ##TODO read from libsvm format from sklearn.datasets import load_svmlight_file km, target_array = load_svmlight_file(sys.argv[1]) #print km #tolgo indice kmgood=km[:,1:].todense() gram=km[:,1:].todense() for i in xrange(len(target_array)): for j in xrange(0,len(target_array)): #AUC cross validationprint i,j,kmgood[i,j],kmgood[i,i],kmgood[j,j] gram[i,j]=kmgood[i,j]/sqrt(kmgood[i,i]*kmgood[j,j]) #print gram from sklearn import cross_validation for rs in range(42,53): f=open(str(sys.argv[3]+".seed"+str(rs)+".c"+str(c)),'w') kf = cross_validation.StratifiedKFold(target_array, n_folds=10, shuffle=True,random_state=rs) #print kf #remove column zero because #first entry of each line is the index #gram=km[:,1:].todense() f.write("Total examples "+str(len(gram))+"\n") f.write("CV\t test_AUROC\n") #print gram # normalization #for i in range(len(gram)): # for j in range(len(gram)): # gram[i,j]=gram[i,j]/sqrt(gram[i,i]+gram[j,j]) sc=[] for train_index, test_index in kf: #print("TRAIN:", train_index, "TEST:", test_index) <|fim▁hole|> clf = svm.SVC(C=c, kernel='precomputed',probability=True) train_gram = [] #[[] for x in xrange(0,len(train))] test_gram = []# [[] for x in xrange(0,len(test))] #generate train matrix and test matrix index=-1 for row in gram: index+=1 if index in train_index: train_gram.append([gram[index,i] for i in train_index]) else: test_gram.append([gram[index,i] for i in train_index]) #print gram X_train, X_test, y_train, y_test = np.array(train_gram), np.array(test_gram), target_array[train_index], target_array[test_index] #COMPUTE INNERKFOLD kf = cross_validation.StratifiedKFold(y_train, n_folds=10, shuffle=True,random_state=rs) inner_scores= cross_validation.cross_val_score( clf, X_train, y_train, cv=kf, scoring='roc_auc') #print "inner scores", inner_scores print "Inner AUROC: %0.4f (+/- %0.4f)" % (inner_scores.mean(), inner_scores.std() / 2) f.write(str(inner_scores.mean())+"\t") clf.fit(X_train, y_train) # predict on test examples y_test_predicted=clf.predict_proba(X_test) #print y_test_predicted sc.append(roc_auc_score(y_test, y_test_predicted[:,1])) f.write(str(roc_auc_score(y_test, y_test_predicted[:,1]))+"\n") f.close() scores=np.array(sc) print "AUROC: %0.4f (+/- %0.4f)" % (scores.mean(), scores.std() / 2)<|fim▁end|>
#generated train and test lists, incuding indices of the examples in training/test #for the specific fold. Indices starts from 0 now
<|file_name|>box_builder.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Creates CSS boxes from a DOM tree. use layout::block::BlockFlow; use layout::float::FloatFlow; use layout::box::{GenericRenderBox, GenericRenderBoxClass, ImageRenderBox, ImageRenderBoxClass}; use layout::box::{RenderBox, RenderBoxBase, RenderBoxClass, RenderBoxUtils, TextRenderBoxClass}; use layout::box::{UnscannedTextRenderBox, UnscannedTextRenderBoxClass}; use layout::context::LayoutContext; use layout::float_context::FloatType; use layout::flow::{AbsoluteFlow, BlockFlowClass, FloatFlowClass, FlowContext, FlowData}; use layout::flow::{ImmutableFlowUtils, InlineBlockFlow, InlineBlockFlowClass, InlineFlowClass}; use layout::flow::{MutableFlowUtils, TableFlow}; use layout::flow; use layout::inline::{InlineFlow}; use layout::text::TextRunScanner; use css::node_style::StyledNode; use style::computed_values::display; use style::computed_values::float; use layout::float_context::{FloatLeft, FloatRight}; use script::dom::node::{AbstractNode, CommentNodeTypeId, DoctypeNodeTypeId}; use script::dom::node::{ElementNodeTypeId, LayoutView, TextNodeTypeId, DocumentNodeTypeId}; use script::dom::node::DocumentFragmentNodeTypeId; use servo_util::range::Range; use servo_util::tree::{TreeNodeRef, TreeNode}; use std::cast; use std::cell::Cell; enum FlowType { AbsoluteFlowType, BlockFlowType, FloatFlowType(FloatType), InlineBlockFlowType, InlineFlowType, RootFlowType, TableFlowType, } pub struct LayoutTreeBuilder { next_cid: int, next_bid: int, } impl LayoutTreeBuilder { pub fn new() -> LayoutTreeBuilder { LayoutTreeBuilder { next_cid: -1, next_bid: -1, } } } // helper object for building the initial box list and making the // mapping between DOM nodes and boxes. struct BoxGenerator<'self> { flow: &'self mut FlowContext, range_stack: @mut ~[uint], } enum InlineSpacerSide { LogicalBefore, LogicalAfter, } impl<'self> BoxGenerator<'self> { /* Debug ids only */ fn new(flow: &'self mut FlowContext) -> BoxGenerator<'self> { debug!("Creating box generator for flow: {:s}", flow.debug_str()); BoxGenerator { flow: flow, range_stack: @mut ~[] } } fn with_clone<R>(&mut self, cb: &fn(BoxGenerator<'self>) -> R) -> R { // FIXME(pcwalton): This is a hack; it can be done safely with linearity. unsafe { let gen = BoxGenerator { flow: cast::transmute_copy(&self.flow), range_stack: self.range_stack }; cb(gen) } } /* Whether "spacer" boxes are needed to stand in for this DOM node */ fn inline_spacers_needed_for_node(_: AbstractNode<LayoutView>) -> bool { return false; } // TODO: implement this, generating spacer fn make_inline_spacer_for_node_side(_: &LayoutContext, _: AbstractNode<LayoutView>, _: InlineSpacerSide) -> Option<@RenderBox> { None } pub fn push_node(&mut self, ctx: &LayoutContext, node: AbstractNode<LayoutView>, builder: &mut LayoutTreeBuilder) { debug!("BoxGenerator[f{:d}]: pushing node: {:s}", flow::base(self.flow).id, node.debug_str()); // TODO: remove this once UA styles work let box_type = self.decide_box_type(node); debug!("BoxGenerator[f{:d}]: point a", flow::base(self.flow).id); let range_stack = &mut self.range_stack; // depending on flow, make a box for this node. match self.flow.class() { InlineFlowClass => { let inline = self.flow.as_inline(); let node_range_start = inline.boxes.len(); range_stack.push(node_range_start); // if a leaf, make a box. if node.is_leaf() { let new_box = BoxGenerator::make_box(ctx, box_type, node, builder); inline.boxes.push(new_box); } else if BoxGenerator::inline_spacers_needed_for_node(node) { // else, maybe make a spacer for "left" margin, border, padding let inline_spacer = BoxGenerator::make_inline_spacer_for_node_side(ctx, node, LogicalBefore); for spacer in inline_spacer.iter() { inline.boxes.push(*spacer); } } // TODO: cases for inline-block, etc. }, BlockFlowClass => { let block = self.flow.as_block(); debug!("BoxGenerator[f{:d}]: point b", block.base.id); let new_box = BoxGenerator::make_box(ctx, box_type, node, builder); debug!("BoxGenerator[f{:d}]: attaching box[b{:d}] to block flow (node: {:s})", block.base.id, new_box.base().id(), node.debug_str()); assert!(block.box.is_none()); block.box = Some(new_box); } FloatFlowClass => { let float = self.flow.as_float(); debug!("BoxGenerator[f{:d}]: point b", float.base.id); let new_box = BoxGenerator::make_box(ctx, box_type, node, builder); debug!("BoxGenerator[f{:d}]: attaching box[b{:d}] to float flow (node: {:s})", float.base.id, new_box.base().id(), node.debug_str()); assert!(float.box.is_none() && float.index.is_none()); float.box = Some(new_box); } _ => warn!("push_node() not implemented for flow f{:d}", flow::base(self.flow).id), } } pub fn pop_node(&mut self, ctx: &LayoutContext, node: AbstractNode<LayoutView>) { debug!("BoxGenerator[f{:d}]: popping node: {:s}", flow::base(self.flow).id, node.debug_str()); match self.flow.class() { InlineFlowClass => { let inline = self.flow.as_inline(); let inline = &mut *inline; if BoxGenerator::inline_spacers_needed_for_node(node) { // If this non-leaf box generates extra horizontal spacing, add a SpacerBox for // it. let result = BoxGenerator::make_inline_spacer_for_node_side(ctx, node, LogicalAfter); for spacer in result.iter() { let boxes = &mut inline.boxes; boxes.push(*spacer); } } let mut node_range: Range = Range::new(self.range_stack.pop(), 0); node_range.extend_to(inline.boxes.len()); if node_range.length() == 0 { warn!("node range length is zero?!") } debug!("BoxGenerator: adding element range={}", node_range); inline.elems.add_mapping(node, &node_range); }, BlockFlowClass => assert!(self.range_stack.len() == 0), FloatFlowClass => assert!(self.range_stack.len() == 0), _ => warn!("pop_node() not implemented for flow {:?}", flow::base(self.flow).id), } } /// Disambiguate between different methods here instead of inlining, since each case has very /// different complexity. fn make_box(layout_ctx: &LayoutContext, ty: RenderBoxClass, node: AbstractNode<LayoutView>, builder: &mut LayoutTreeBuilder) -> @RenderBox { let base = RenderBoxBase::new(node, builder.next_box_id()); let result = match ty { GenericRenderBoxClass => @GenericRenderBox::new(base) as @RenderBox, TextRenderBoxClass | UnscannedTextRenderBoxClass => { @UnscannedTextRenderBox::new(base) as @RenderBox } ImageRenderBoxClass => BoxGenerator::make_image_box(layout_ctx, node, base), }; debug!("BoxGenerator: created box: {:s}", result.debug_str()); result } fn make_image_box(layout_ctx: &LayoutContext, node: AbstractNode<LayoutView>, base: RenderBoxBase) -> @RenderBox { assert!(node.is_image_element()); do node.with_imm_image_element |image_element| { if image_element.image.is_some() { // FIXME(pcwalton): Don't copy URLs. let url = (*image_element.image.get_ref()).clone(); @ImageRenderBox::new(base.clone(), url, layout_ctx.image_cache) as @RenderBox } else { info!("Tried to make image box, but couldn't find image. Made generic box \ instead."); @GenericRenderBox::new(base.clone()) as @RenderBox } } } fn decide_box_type(&self, node: AbstractNode<LayoutView>) -> RenderBoxClass { if node.is_text() { TextRenderBoxClass } else if node.is_image_element() { do node.with_imm_image_element |image_element| { match image_element.image { Some(_) => ImageRenderBoxClass, None => GenericRenderBoxClass, } } } else if node.is_element() { GenericRenderBoxClass } else { fail!("Hey, doctypes and comments shouldn't get here! They are display:none!") } } } enum BoxGenResult<'self> { NoGenerator, ParentGenerator, SiblingGenerator, NewGenerator(BoxGenerator<'self>), /// Start a new generator, but also switch the parent out for the /// grandparent, ending the parent generator. ReparentingGenerator(BoxGenerator<'self>), Mixed(BoxGenerator<'self>, ~BoxGenResult<'self>), } /// Determines whether the result of child box construction needs to reparent /// or not. Reparenting is needed when a block flow is a child of an inline; /// in that case, we need to let the level up the stack no to end the parent /// genertor and continue with the grandparent. enum BoxConstructResult<'self> { Normal(Option<BoxGenerator<'self>>), Reparent(BoxGenerator<'self>), } impl LayoutTreeBuilder { /* Debug-only ids */ pub fn next_flow_id(&mut self) -> int { self.next_cid += 1; self.next_cid } pub fn next_box_id(&mut self) -> int { self.next_bid += 1; self.next_bid } /// Creates necessary box(es) and flow context(s) for the current DOM node, /// and recurses on its children. pub fn construct_recursively<'a>( &mut self, layout_ctx: &LayoutContext, cur_node: AbstractNode<LayoutView>, mut grandparent_generator: Option<BoxGenerator<'a>>, mut parent_generator: BoxGenerator<'a>, mut prev_sibling_generator: Option<BoxGenerator<'a>>) -> BoxConstructResult<'a> { debug!("Considering node: {:s}", cur_node.debug_str()); let box_gen_result = { let grandparent_gen_ref = match grandparent_generator { Some(ref mut generator) => Some(generator), None => None, }; let sibling_gen_ref = match prev_sibling_generator { Some(ref mut generator) => Some(generator), None => None, }; self.box_generator_for_node(cur_node, grandparent_gen_ref, &mut parent_generator, sibling_gen_ref) }; let mut reparent = false; debug!("result from generator_for_node: {:?}", &box_gen_result); // Skip over nodes that don't belong in the flow tree let (this_generator, next_generator) = match box_gen_result { NoGenerator => return Normal(prev_sibling_generator), ParentGenerator => { do parent_generator.with_clone |clone| { (clone, None) } } SiblingGenerator => (prev_sibling_generator.take_unwrap(), None), NewGenerator(gen) => (gen, None), ReparentingGenerator(gen) => { reparent = true; (gen, None) } Mixed(gen, next_gen) => (gen, Some(match *next_gen { ParentGenerator => { do parent_generator.with_clone |clone| { clone } } SiblingGenerator => prev_sibling_generator.take_unwrap(), _ => fail!("Unexpect BoxGenResult") })) }; let mut this_generator = this_generator; debug!("point a: {:s}", cur_node.debug_str()); this_generator.push_node(layout_ctx, cur_node, self); debug!("point b: {:s}", cur_node.debug_str()); // recurse on child nodes. let prev_gen_cell = Cell::new(Normal(None)); for child_node in cur_node.children() { do parent_generator.with_clone |grandparent_clone| { let grandparent_clone_cell = Cell::new(Some(grandparent_clone)); do this_generator.with_clone |parent_clone| { match prev_gen_cell.take() { Normal(prev_gen) => { let prev_generator = self.construct_recursively(layout_ctx, child_node, grandparent_clone_cell.take(), parent_clone, prev_gen); prev_gen_cell.put_back(prev_generator); } Reparent(prev_gen) => { let prev_generator = self.construct_recursively(layout_ctx, child_node, None, grandparent_clone_cell.take().unwrap(), Some(prev_gen)); prev_gen_cell.put_back(prev_generator); } } } } } this_generator.pop_node(layout_ctx, cur_node); self.simplify_children_of_flow(layout_ctx, this_generator.flow); match next_generator { Some(n_gen) => Normal(Some(n_gen)), None => { if reparent { Reparent(this_generator) } else { Normal(Some(this_generator)) } } } } pub fn box_generator_for_node<'a>(&mut self, node: AbstractNode<LayoutView>, grandparent_generator: Option<&mut BoxGenerator<'a>>, parent_generator: &mut BoxGenerator<'a>, mut sibling_generator: Option<&mut BoxGenerator<'a>>) -> BoxGenResult<'a> { let display = match node.type_id() { ElementNodeTypeId(_) => match node.style().Box.display { display::none => return NoGenerator, display => display, }, TextNodeTypeId => display::inline, DocumentNodeTypeId(_) | DoctypeNodeTypeId | DocumentFragmentNodeTypeId | CommentNodeTypeId => return NoGenerator, }; // FIXME(pcwalton): Unsafe. let sibling_flow: Option<&mut FlowContext> = sibling_generator.as_mut().map(|gen| { unsafe { cast::transmute_copy(&gen.flow) } }); let is_float = if (node.is_element()) { match node.style().Box.float { float::none => None, float::left => Some(FloatLeft), float::right => Some(FloatRight) } } else { None }; let sibling_flow_class = match sibling_flow { None => None, Some(flow) => Some(flow.class()), }; let new_generator = match (display, parent_generator.flow.class(), sibling_flow_class) { // Floats (display::block, BlockFlowClass, _) | (display::block, FloatFlowClass, _) if is_float.is_some() => { self.create_child_generator(node, parent_generator, FloatFlowType(is_float.unwrap())) } // If we're placing a float after an inline, append the float to the inline flow, // then continue building from the inline flow in case there are more inlines // afterward. (display::block, _, Some(InlineFlowClass)) if is_float.is_some() => { let float_type = FloatFlowType(is_float.unwrap()); let float_generator = self.create_child_generator(node, sibling_generator.unwrap(), float_type); return Mixed(float_generator, ~SiblingGenerator); } // This is a catch-all case for when: // a) sibling_flow is None // b) sibling_flow is a BlockFlow (display::block, InlineFlowClass, _) if is_float.is_some() => { self.create_child_generator(node, parent_generator, FloatFlowType(is_float.unwrap())) } (display::block, BlockFlowClass, _) => { match (parent_generator.flow.as_block().is_root, node.parent_node()) { // If this is the root node, then use the root flow's // context. Otherwise, make a child block context. (true, Some(parent)) if !parent.is_document() => { self.create_child_generator(node, parent_generator, BlockFlowType) } (true, None) | (true, Some(_)) => return ParentGenerator, (false, _) => { self.create_child_generator(node, parent_generator, BlockFlowType) } } } (display::block, FloatFlowClass, _) => { self.create_child_generator(node, parent_generator, BlockFlowType) } // Inlines that are children of inlines are part of the same flow (display::inline, InlineFlowClass, _) => return ParentGenerator, (display::inline_block, InlineFlowClass, _) => return ParentGenerator, // Inlines that are children of blocks create new flows if their // previous sibling was a block. (display::inline, BlockFlowClass, Some(BlockFlowClass)) | (display::inline_block, BlockFlowClass, Some(BlockFlowClass)) => { self.create_child_generator(node, parent_generator, InlineFlowType) } // The first two cases should only be hit when a FloatFlow // is the first child of a BlockFlow. Other times, we will (display::inline, _, Some(FloatFlowClass)) | (display::inline_block, _, Some(FloatFlowClass)) | (display::inline, FloatFlowClass, _) | (display::inline_block, FloatFlowClass, _) => { self.create_child_generator(node, parent_generator, InlineFlowType) } // Inlines whose previous sibling was not a block try to use their // sibling's flow context. (display::inline, BlockFlowClass, _) | (display::inline_block, BlockFlowClass, _) => { return match sibling_generator { None => NewGenerator(self.create_child_generator(node, parent_generator, InlineFlowType)), Some(*) => SiblingGenerator } } // blocks that are children of inlines need to split their parent // flows. (display::block, InlineFlowClass, _) => { match grandparent_generator { None => fail!("expected to have a grandparent block flow"), Some(grandparent_gen) => { assert!(grandparent_gen.flow.is_block_like());<|fim▁hole|> return ReparentingGenerator(block_gen); } } } _ => return ParentGenerator }; NewGenerator(new_generator) } pub fn create_child_generator<'a>( &mut self, node: AbstractNode<LayoutView>, parent_generator: &mut BoxGenerator<'a>, ty: FlowType) -> BoxGenerator<'a> { let new_flow = self.make_flow(ty, node); parent_generator.flow.add_new_child(new_flow); let flow_ref = flow::last_child(parent_generator.flow).unwrap(); BoxGenerator::new(*flow_ref) } /// Fix up any irregularities such as: /// /// * split inlines (CSS 2.1 Section 9.2.1.1) /// * elide non-preformatted whitespace-only text boxes and their flows (CSS 2.1 Section /// 9.2.2.1). /// /// The latter can only be done immediately adjacent to, or at the beginning or end of a block /// flow. Otherwise, the whitespace might affect whitespace collapsing with adjacent text. pub fn simplify_children_of_flow(&self, ctx: &LayoutContext, parent_flow: &mut FlowContext) { match parent_flow.class() { InlineFlowClass => { let mut found_child_inline = false; let mut found_child_block = false; for child_ctx in flow::child_iter(parent_flow) { match child_ctx.class() { InlineFlowClass | InlineBlockFlowClass => found_child_inline = true, BlockFlowClass => found_child_block = true, _ => {} } } if found_child_block && found_child_inline { self.fixup_split_inline(parent_flow) } } BlockFlowClass | FloatFlowClass => { // check first/last child for whitespace-ness let mut do_remove = false; let p_id = flow::base(parent_flow).id; do parent_flow.with_first_child |mut first_child| { for first_flow in first_child.mut_iter() { if first_flow.starts_inline_flow() { // FIXME: workaround for rust#6393 { let first_inline_flow = first_flow.as_inline(); let boxes = &first_inline_flow.boxes; if boxes.len() == 1 { let first_box = boxes[0]; // FIXME(pcwalton): Rust bug if first_box.is_whitespace_only() { debug!("LayoutTreeBuilder: pruning whitespace-only first \ child flow f{:d} from parent f{:d}", first_inline_flow.base.id, p_id); do_remove = true; } } } } } } if (do_remove) { parent_flow.remove_first(); } do_remove = false; let p_id = flow::base(parent_flow).id; do parent_flow.with_last_child |mut last_child| { for last_flow in last_child.mut_iter() { if last_flow.starts_inline_flow() { // FIXME: workaround for rust#6393 { let last_inline_flow = last_flow.as_inline(); let boxes = &last_inline_flow.boxes; if boxes.len() == 1 && boxes.last().is_whitespace_only() { let last_box = boxes.last(); // FIXME(pcwalton): Rust bug if last_box.is_whitespace_only() { debug!("LayoutTreeBuilder: pruning whitespace-only last \ child flow f{:d} from parent f{:d}", last_inline_flow.base.id, p_id); do_remove = true; } } } } } } if (do_remove) { parent_flow.remove_last(); } // Issue 543: We only need to do this if there are inline child // flows, but there's not a quick way to check at the moment. for child_flow in flow::child_iter(parent_flow) { match child_flow.class() { InlineFlowClass | InlineBlockFlowClass => { let mut scanner = TextRunScanner::new(); scanner.scan_for_runs(ctx, *child_flow); } _ => {} } } } _ => {} } } pub fn fixup_split_inline(&self, _: &mut FlowContext) { // TODO: finish me. fail!(~"TODO: handle case where an inline is split by a block") } /// Entry point for box creation. Should only be called on the root DOM element. pub fn construct_trees(&mut self, layout_ctx: &LayoutContext, root: AbstractNode<LayoutView>) -> Result<~FlowContext:, ()> { debug!("Constructing flow tree for DOM: "); debug!("{:?}", root.dump()); let mut new_flow = self.make_flow(RootFlowType, root); { let new_generator = BoxGenerator::new(new_flow); self.construct_recursively(layout_ctx, root, None, new_generator, None); } return Ok(new_flow) } /// Creates a flow of the given type for the supplied node. pub fn make_flow(&mut self, flow_type: FlowType, node: AbstractNode<LayoutView>) -> ~FlowContext: { let info = FlowData::new(self.next_flow_id(), node); let result = match flow_type { AbsoluteFlowType => ~AbsoluteFlow::new(info) as ~FlowContext:, BlockFlowType => ~BlockFlow::new(info) as ~FlowContext:, FloatFlowType(f_type) => ~FloatFlow::new(info, f_type) as ~FlowContext:, InlineBlockFlowType => ~InlineBlockFlow::new(info) as ~FlowContext:, InlineFlowType => ~InlineFlow::new(info) as ~FlowContext:, RootFlowType => ~BlockFlow::new_root(info) as ~FlowContext:, TableFlowType => ~TableFlow::new(info) as ~FlowContext:, }; debug!("LayoutTreeBuilder: created flow: {:s}", result.debug_str()); result } }<|fim▁end|>
let block_gen = self.create_child_generator(node, grandparent_gen, BlockFlowType);
<|file_name|>ForgotPasswordModule.ts<|end_file_name|><|fim▁begin|>import * as angular from 'angular'; import { ForgotPasswordComponent } from './ForgotPasswordComponent'; import { ForgotPasswordController } from './ForgotPasswordController'; export default angular .module('users.forgotPassword', [])<|fim▁hole|> .controller(ForgotPasswordController.$name, ForgotPasswordController) .name;<|fim▁end|>
.component(ForgotPasswordComponent.$name, new ForgotPasswordComponent())
<|file_name|>haiku_gen.py<|end_file_name|><|fim▁begin|>from keras.models import Sequential from keras.layers.core import Dense, Activation, Dropout from keras.layers.recurrent import LSTM from keras.preprocessing.sequence import pad_sequences import numpy as np import random, sys ''' Example script to generate haiku Text. It is recommended to run this script on GPU, as recurrent networks are quite computationally intensive. If you try this script on new data, make sure your corpus has at least ~100k characters. ~1M is better. ''' path = "haiku_all.txt" text = open(path).read().lower() print('corpus length:', len(text)) chars = set(text) print('total chars:', len(chars)) char_indices = dict((c, i) for i, c in enumerate(chars)) indices_char = dict((i, c) for i, c in enumerate(chars)) # cut the text in semi-redundant sequences of maxlen characters maxlen = 100 step = 3 sentences = [] next_chars = [] for i in range(0, len(text) - maxlen, step): sentences.append(text[i : i + maxlen]) next_chars.append(text[i + maxlen]) print('nb sequences:', len(sentences)) print('Vectorization...') X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool) y = np.zeros((len(sentences), len(chars)), dtype=np.bool) for i, sentence in enumerate(sentences): for t, char in enumerate(sentence):<|fim▁hole|> print "X.shape: %s, Y.shape: %s" % (X.shape, y.shape) # build the model: 2 stacked LSTM print('Build model...') model = Sequential() model.add(LSTM(len(chars), 512, return_sequences=False)) model.add(Dropout(0.2)) ## Remove above 2 lines and replace by below 2 lines to make 2 layers LSTM. #model.add(LSTM(len(chars), 512, return_sequences=True)) #model.add(Dropout(0.2)) #model.add(LSTM(512, 512, return_sequences=False)) #model.add(Dropout(0.2)) model.add(Dense(512, len(chars))) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') # helper function to sample an index from a probability array def sample(a, temperature=1.0): a = np.log(a)/temperature a = np.exp(a)/np.sum(np.exp(a)) return np.argmax(np.random.multinomial(1,a,1)) # train the model, output generated text after each iteration def generate_from_model(model, begin_sent=None, diversity_l=[0.2, 0.5, 1.0, 1.2]): if begin_sent is None: start_index = random.randint(0, len(text) - maxlen - 1) for diversity in diversity_l: print print '----- diversity:', diversity generated = '' if begin_sent is None: sentence = text[start_index : start_index + maxlen] else: sentence = begin_sent generated += sentence print '----- Generating with seed: "' + sentence + '"' sys.stdout.write(generated) tot_lines = 0 tot_chars = 0 while True: if tot_lines > 3 or tot_chars > 120: break x = np.zeros((1, maxlen, len(chars))) for t, char in enumerate(sentence): x[0, t, char_indices[char]] = 1. preds = model.predict(x, verbose=0)[0] next_index = sample(preds, diversity) next_char = indices_char[next_index] tot_chars += 1 generated += next_char if next_char == '\t': tot_lines += 1 sentence = sentence[1:] + next_char sys.stdout.write(next_char) sys.stdout.flush() print "" if __name__ == "__main__": history = model.fit(X, y, batch_size=200, nb_epoch=20) generate_from_model(model) """ for i in xrange(1,4): history = model.fit(X, y, batch_size=100*i, nb_epoch=20) generate_from_model(model) """<|fim▁end|>
X[i, t, char_indices[char]] = 1 y[i, char_indices[next_chars[i]]] = 1
<|file_name|>udp-adapter.js<|end_file_name|><|fim▁begin|>'use strict' // create a net-peer compatible object based on a UDP datagram socket module.exports = function udpAdapter(udpSocket, udpDestinationHost, udpDestinationPort) { const _listeners = [] udpSocket.on('message', (msg, rinfo) => { //console.log(`server got: ${msg} from ${rinfo.address}:${rinfo.port}`)<|fim▁hole|> _listeners[i](msg) } }) let on = function(event, fn) { if (event === 'data') { _listeners.push(fn) } } let send = function(message) { udpSocket.send(Buffer.from(message.buffer), udpDestinationPort, udpDestinationHost, (err) => { }) } return Object.freeze({ on, send }) }<|fim▁end|>
for(let i=0; i < _listeners.length; i++) {
<|file_name|>iptorrents.py<|end_file_name|><|fim▁begin|># Author: seedboy # URL: https://github.com/seedboy # # This file is part of SickRage. # # SickRage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickRage. If not, see <http://www.gnu.org/licenses/>. import re import traceback import datetime import urlparse import sickbeard import generic from sickbeard.common import Quality from sickbeard import logger from sickbeard import tvcache from sickbeard import db from sickbeard import classes from sickbeard import helpers from sickbeard import show_name_helpers from sickbeard.exceptions import ex, AuthException from sickbeard import clients from lib import requests from lib.requests import exceptions from sickbeard.bs4_parser import BS4Parser from lib.unidecode import unidecode from sickbeard.helpers import sanitizeSceneName from sickbeard.show_name_helpers import allPossibleShowNames class IPTorrentsProvider(generic.TorrentProvider): def __init__(self): generic.TorrentProvider.__init__(self, "IPTorrents") self.supportsBacklog = True self.enabled = False self.username = None self.password = None self.ratio = None self.freeleech = False self.cache = IPTorrentsCache(self) self.urls = {'base_url': 'https://www.iptorrents.com', 'login': 'https://www.iptorrents.com/torrents/', 'search': 'https://www.iptorrents.com/torrents/?%s%s&q=%s&qf=ti', } self.url = self.urls['base_url'] self.categorie = 'l73=1&l78=1&l66=1&l65=1&l79=1&l5=1&l4=1' def isEnabled(self): return self.enabled def imageName(self): return 'iptorrents.png' def getQuality(self, item, anime=False): quality = Quality.sceneQuality(item[0], anime) return quality def _checkAuth(self): if not self.username or not self.password: raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.") return True def _doLogin(self): login_params = {'username': self.username, 'password': self.password, 'login': 'submit', } try: response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False) except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e: logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR) return False if re.search('tries left', response.text) \ or re.search('<title>IPT</title>', response.text) \ or response.status_code == 401: logger.log(u'Invalid username or password for ' + self.name + ', Check your settings!', logger.ERROR) return False return True def _get_season_search_strings(self, ep_obj): search_string = {'Season': []} for show_name in set(show_name_helpers.allPossibleShowNames(self.show)): if ep_obj.show.air_by_date or ep_obj.show.sports: ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0] elif ep_obj.show.anime: ep_string = show_name + ' ' + "%d" % ep_obj.scene_absolute_number else: ep_string = show_name + ' S%02d' % int(ep_obj.scene_season) #1) showName SXX search_string['Season'].append(ep_string) return [search_string] def _get_episode_search_strings(self, ep_obj, add_string=''): search_string = {'Episode': []} if not ep_obj: return [] if self.show.air_by_date: for show_name in set(allPossibleShowNames(self.show)): ep_string = sanitizeSceneName(show_name) + ' ' + \ str(ep_obj.airdate).replace('-', '|') search_string['Episode'].append(ep_string) elif self.show.sports: for show_name in set(allPossibleShowNames(self.show)): ep_string = sanitizeSceneName(show_name) + ' ' + \ str(ep_obj.airdate).replace('-', '|') + '|' + \ ep_obj.airdate.strftime('%b') search_string['Episode'].append(ep_string) elif self.show.anime:<|fim▁hole|> "%i" % int(ep_obj.scene_absolute_number) search_string['Episode'].append(ep_string) else: for show_name in set(show_name_helpers.allPossibleShowNames(self.show)): ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \ sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season, 'episodenumber': ep_obj.scene_episode} + ' %s' % add_string search_string['Episode'].append(re.sub('\s+', ' ', ep_string)) return [search_string] def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0): results = [] items = {'Season': [], 'Episode': [], 'RSS': []} freeleech = '&free=on' if self.freeleech else '' if not self._doLogin(): return results for mode in search_params.keys(): for search_string in search_params[mode]: if isinstance(search_string, unicode): search_string = unidecode(search_string) # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile searchURL = self.urls['search'] % (self.categorie, freeleech, search_string) searchURL += ';o=seeders' if mode != 'RSS' else '' logger.log(u"" + self.name + " search page URL: " + searchURL, logger.DEBUG) data = self.getURL(searchURL) if not data: continue try: data = re.sub(r'(?im)<button.+?<[\/]button>', '', data, 0) with BS4Parser(data, features=["html5lib", "permissive"]) as html: if not html: logger.log(u"Invalid HTML data: " + str(data), logger.DEBUG) continue if html.find(text='No Torrents Found!'): logger.log(u"No results found for: " + search_string + " (" + searchURL + ")", logger.DEBUG) continue torrent_table = html.find('table', attrs={'class': 'torrents'}) torrents = torrent_table.find_all('tr') if torrent_table else [] #Continue only if one Release is found if len(torrents) < 2: logger.log(u"The Data returned from " + self.name + " do not contains any torrent", logger.WARNING) continue for result in torrents[1:]: try: torrent = result.find_all('td')[1].find('a') torrent_name = torrent.string torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href'] torrent_details_url = self.urls['base_url'] + torrent['href'] torrent_seeders = int(result.find('td', attrs={'class': 'ac t_seeders'}).string) ## Not used, perhaps in the future ## #torrent_id = int(torrent['href'].replace('/details.php?id=', '')) #torrent_leechers = int(result.find('td', attrs = {'class' : 'ac t_leechers'}).string) except (AttributeError, TypeError): continue # Filter unseeded torrent and torrents with no name/url if mode != 'RSS' and torrent_seeders == 0: continue if not torrent_name or not torrent_download_url: continue item = torrent_name, torrent_download_url logger.log(u"Found result: " + torrent_name + " (" + torrent_details_url + ")", logger.DEBUG) items[mode].append(item) except Exception, e: logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR) results += items[mode] return results def _get_title_and_url(self, item): title, url = item if title: title = u'' + title title = title.replace(' ', '.') if url: url = str(url).replace('&amp;', '&') return (title, url) def findPropers(self, search_date=datetime.datetime.today()): results = [] myDB = db.DBConnection() sqlResults = myDB.select( 'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' + ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' + ' WHERE e.airdate >= ' + str(search_date.toordinal()) + ' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' + ' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))' ) if not sqlResults: return [] for sqlshow in sqlResults: self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"])) if self.show: curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"])) searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK') for item in self._doSearch(searchString[0]): title, url = self._get_title_and_url(item) results.append(classes.Proper(title, url, datetime.datetime.today(), self.show)) return results def seedRatio(self): return self.ratio class IPTorrentsCache(tvcache.TVCache): def __init__(self, provider): tvcache.TVCache.__init__(self, provider) # Only poll IPTorrents every 10 minutes max self.minTime = 10 def _getRSSData(self): search_params = {'RSS': ['']} return {'entries': self.provider._doSearch(search_params)} provider = IPTorrentsProvider()<|fim▁end|>
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)): ep_string = sanitizeSceneName(show_name) + ' ' + \
<|file_name|>DummyHashServiceImpl.java<|end_file_name|><|fim▁begin|>/** * Copyright 2013 Agustín Miura <"[email protected]"> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ar.com.imperium.common.security; import org.springframework.stereotype.Component; @Component("dummyHashService") public class DummyHashServiceImpl implements IHashService { @Override public String hashString(String input) throws Exception<|fim▁hole|> } }<|fim▁end|>
{ return input;
<|file_name|>meteodata.cpp<|end_file_name|><|fim▁begin|>#include "meteodata.h" #include "meteojour.h" MeteoData::MeteoData(QString v,QObject *parent):QObject(parent),_ville(v){ _mesure = "metric"; } /* author : Fontaine pierre mail : [email protected] but : afficher message d'erreur dans la console remarque: precond : postcond: ©2017 */ void MeteoData::onError(){ qDebug()<<"Erreur de requete"; } /* author : Fontaine pierre mail : [email protected] but : effectuer une requete pour recupérer données JSON de l'API remarque: precond : postcond: ©2017 */ void MeteoData::requete(){ QNetworkAccessManager *manager = new QNetworkAccessManager(this); qDebug()<<"Exe Req"; //execution d'une requete //callback du storage a la reception de la requete qDebug()<<"Creation connexion pour succès ou echec"; connect(manager,SIGNAL(finished(QNetworkReply*)), this,SLOT(storeReplyInObj(QNetworkReply*))); manager->get(QNetworkRequest(QUrl("http://api.openweathermap.org/data/2.5/forecast/daily?q="+_ville+"&appid=9a5b3401d0ae43c0fdd643de1a05660c&units="+_mesure+"&cnt=5"))); } /* author : Fontaine pierre mail : [email protected] but : Stocker la réponse de l'API dans un QJsonObject remarque: precond : postcond: ©2017 */ void MeteoData::storeReplyInObj(QNetworkReply* r){ qDebug()<<"CallBack"; if(r->error() == QNetworkReply::NoError){ QByteArray bts = r->readAll(); QString str(bts); QJsonDocument doc = QJsonDocument::fromJson(str.toUtf8()); if(!doc.isNull()){ if(doc.isObject()){ obj = doc.object(); parseObj(); }else{ qDebug()<<"le doc n'est pas un objet"; } } else { qDebug() << "JSON FORMAT INVALIDE"; } //qDebug()<<bts; }else{ qDebug()<<r->errorString(); } } /* author : Fontaine pierre mail : [email protected] but : parser le QJsonDocument pour récupérer les données remarque: precond : postcond: ©2017 */ void MeteoData::parseObj(){ qDebug() << obj.keys();//("city", "cnt", "cod", "list", "message") QJsonArray list = obj.value("list").toArray(); for(int i = 0; i < 5;i ++){ //qDebug() << list.at(i); QJsonObject jData = list.at(i).toObject(); //qDebug() << jData.keys();//("clouds", "deg", "dt", "humidity", "pressure", "rain", "speed", "temp", "weather") QJsonObject jTemp = jData.value("temp").toObject(); _coeffNuage = jData.value("clouds").toDouble(); _coeffPluie = jData.value("rain").toDouble(); _pressure = jData.value("pressure").toDouble(); _humidity = jData.value("humidity").toDouble(); _tempMin = jTemp.value("min").toDouble(); _tempMax = jTemp.value("max").toDouble(); _temp = jTemp.value("day").toDouble(); emit dataChanged(i); } } /* author : Fontaine pierre mail : [email protected] but : getter humidité remarque: precond : postcond: ©2017 */ double MeteoData::getHumidity()const{ return _humidity; } /* author : Fontaine pierre mail : [email protected] but : getter pression remarque: precond : postcond: ©2017 */ double MeteoData::getPressure()const{ return _pressure; } /* author : Fontaine pierre mail : [email protected] but : getter temp min remarque: precond : postcond: ©2017 */ double MeteoData::getTempMin()const{ return _tempMin; } /* author : Fontaine pierre mail : [email protected] but : getter temp max remarque: precond : postcond: ©2017 */ double MeteoData::getTempMax()const{ return _tempMax; } /* author : Fontaine pierre mail : [email protected] but : getter temp actuelle remarque: precond : postcond: ©2017 */ double MeteoData::getTemp()const{ return _temp; } /* author : Fontaine pierre mail : [email protected] but : setter humidité remarque: precond : postcond: ©2017 */ void MeteoData::setHumidity(double h){ _humidity = h; } /* author : Fontaine pierre mail : [email protected] but : setter pression remarque: precond : postcond: ©2017 */ void MeteoData::setPressure(double p){ _pressure = p; } /* author : Fontaine pierre mail : [email protected] but : setter temp min remarque: precond : postcond: ©2017 */ void MeteoData::setTempMin(double t){ _tempMin = t; } /* author : Fontaine pierre mail : [email protected] but : setter temp max remarque: precond : postcond: ©2017 */ void MeteoData::setTempMax(double t){ _tempMax = t; } /* author : Fontaine pierre mail : [email protected] but : setter temp actuelle remarque: precond : postcond: ©2017 */ void MeteoData::setTemp(double t){ _temp = t; } /* author : Fontaine pierre mail : [email protected] but : getter ville actuelle remarque: precond : postcond: ©2017 */ QString MeteoData::getVille()const{ return _ville; } /* author : Fontaine pierre mail : [email protected] but : setter ville actuelle remarque: precond : postcond: ©2017 */ void MeteoData::setVille(QString v){ _ville = v; } /* author : Fontaine pierre mail : [email protected] but : getter coeff nuage remarque: precond : postcond: ©2017 */ double MeteoData::getCoeffNuage()const{ return _coeffNuage; } /* author : Fontaine pierre mail : [email protected] but : getter coeff pluie remarque:<|fim▁hole|> precond : postcond: ©2017 */ double MeteoData::getCoeffPluie()const{ return _coeffPluie; } /* author : Fontaine pierre mail : [email protected] but : setter coeff nuage remarque: precond : postcond: ©2017 */ void MeteoData::setCoeffNuage(double c){ _coeffNuage = c; } /* author : Fontaine pierre mail : [email protected] but : setter coeff pluie remarque: precond : postcond: ©2017 */ void MeteoData::setCoeffPluie(double c){ _coeffPluie = c; } /* author : Fontaine pierre mail : [email protected] but : SLOT ré effectuer une requete après une action remarque: precond : postcond: ©2017 */ void MeteoData::reqAgain(){ requete(); } /* author : Fontaine pierre mail : [email protected] but : setter mesure remarque: choix entre "metric,impérial,default" precond : postcond: ©2017 */ void MeteoData::setMesure(QString s){ _mesure = s; }<|fim▁end|>
<|file_name|>hwbuttondialog.cpp<|end_file_name|><|fim▁begin|>/* * This file is part of the KDE wacomtablet project. For copyright * information and license terms see the AUTHORS and COPYING files * in the top-level directory of this distribution. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "hwbuttondialog.h" #include "ui_hwbuttondialog.h" #include <QMouseEvent> #include <QPushButton> static unsigned int QtButton2X11Button(Qt::MouseButton qtbutton) { // We could probably just use log2 here, but I don't know if this can backfire // Qt seems to offer no function for getting index of a set flag unsigned int button = qtbutton; unsigned int buttonNumber = 0; while (button > 0) { buttonNumber++; button >>= 1; } if (buttonNumber < 4) { return buttonNumber; } else { // X11 buttons 4-7 are reserved for scroll wheel return buttonNumber + 4; } } using namespace Wacom; HWButtonDialog::HWButtonDialog(int maxButtons, QWidget *parent) : QDialog(parent) , ui(new Ui::HWButtonDialog) , m_maxButtons(maxButtons) , m_nextButton(1) { ui->setupUi(this); ui->buttonBox->button(QDialogButtonBox::Ok)->setEnabled(false); nextButton(); } HWButtonDialog::~HWButtonDialog() { delete ui; } void HWButtonDialog::nextButton() { if(m_nextButton <= m_maxButtons) { QString text = i18n("Please press Button %1 on the tablet now.", m_nextButton); text.prepend(QLatin1String("<b>")); text.append(QLatin1String("</b>")); ui->textEdit->insertHtml(text); ui->textEdit->insertHtml(QLatin1String("<br>")); QTextCursor cursor = ui->textEdit->textCursor(); cursor.movePosition(QTextCursor::End, QTextCursor::MoveAnchor); ui->textEdit->setTextCursor(cursor); } } <|fim▁hole|>{ if (event->button() == Qt::MouseButton::NoButton) { return; } hwKey(QtButton2X11Button(event->button())); } void HWButtonDialog::hwKey(unsigned int button) { if(m_nextButton <= m_maxButtons) { QString text = i18n("Hardware button %1 detected.", button); ui->textEdit->insertHtml(text); ui->textEdit->insertHtml(QLatin1String("<br><br>")); QTextCursor cursor = ui->textEdit->textCursor(); cursor.movePosition(QTextCursor::End, QTextCursor::MoveAnchor); ui->textEdit->setTextCursor(cursor); m_buttonMap << button; m_nextButton++; } if(m_nextButton > m_maxButtons) { ui->textEdit->insertHtml(i18n("All buttons detected. Please close dialog")); ui->textEdit->insertHtml(QLatin1String("<br>")); QTextCursor cursor = ui->textEdit->textCursor(); cursor.movePosition(QTextCursor::End, QTextCursor::MoveAnchor); ui->textEdit->setTextCursor(cursor); ui->buttonBox->button(QDialogButtonBox::Ok)->setEnabled(true); } else { nextButton(); } } QList<unsigned int> HWButtonDialog::buttonMap() const { return m_buttonMap; }<|fim▁end|>
void HWButtonDialog::mousePressEvent(QMouseEvent *event)
<|file_name|>InitGui.py<|end_file_name|><|fim▁begin|>class DrawingDimensioningWorkbench (Workbench): # Icon generated using by converting linearDimension.svg to xpm format using Gimp Icon = '''<|fim▁hole|>" c None", ". c #000000", "+ c #0008FF", "@ c #0009FF", "# c #000AFF", "$ c #00023D", "% c #0008F7", "& c #0008EE", "* c #000587", "= c #000001", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". +@@ + .", ". @+@@+ +@@+@ .", ". +@+@@@@@@ @@@@@@@# .", "$%@@@@@@@@@+@@@@@@@@@@@@@@@@@@&$", ". #@@@@@@@@ #+@@@@@@@@*=", ". @+@@+ +@@@@@ .", ". +@ #@++ .", ". # .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". ."}; ''' MenuText = 'Drawing Dimensioning' def Initialize(self): import importlib, os from dimensioning import __dir__, debugPrint, iconPath import linearDimension import linearDimension_stack import deleteDimension import circularDimension import grabPointAdd import textAdd import textEdit import textMove import escapeDimensioning import angularDimension import radiusDimension import centerLines import noteCircle import toleranceAdd commandslist = [ 'dd_linearDimension', #where dd is short-hand for drawing dimensioning 'dd_linearDimensionStack', 'dd_circularDimension', 'dd_radiusDimension', 'dd_angularDimension', 'dd_centerLines', 'dd_centerLine', 'dd_noteCircle', 'dd_grabPoint', 'dd_addText', 'dd_editText', 'dd_moveText', 'dd_addTolerance', 'dd_deleteDimension', 'dd_escapeDimensioning', ] self.appendToolbar('Drawing Dimensioning', commandslist) import unfold import unfold_bending_note import unfold_export_to_dxf unfold_cmds = [ 'dd_unfold', 'dd_bendingNote', ] if hasattr(os,'uname') and os.uname()[0] == 'Linux' : #this command only works on Linux systems unfold_cmds.append('dd_exportToDxf') self.appendToolbar( 'Drawing Dimensioning Folding', unfold_cmds ) import weldingSymbols if int( FreeCAD.Version()[1] > 15 ) and int( FreeCAD.Version()[2].split()[0] ) > 5165: weldingCommandList = ['dd_weldingGroupCommand'] else: weldingCommandList = weldingSymbols.weldingCmds self.appendToolbar('Drawing Dimensioning Welding Symbols', weldingCommandList) self.appendToolbar('Drawing Dimensioning Help', [ 'dd_help' ]) FreeCADGui.addIconPath(iconPath) FreeCADGui.addPreferencePage( os.path.join( __dir__, 'Resources', 'ui', 'drawing_dimensioing_prefs-base.ui'),'Drawing Dimensioning' ) Gui.addWorkbench(DrawingDimensioningWorkbench())<|fim▁end|>
/* XPM */ static char * linearDimension_xpm[] = { "32 32 10 1",
<|file_name|>Helios-Debugger.js<|end_file_name|><|fim▁begin|>define("helios/Helios-Debugger", ["amber/boot", "amber_core/Kernel-Objects", "helios/Helios-Core", "helios/Helios-Workspace"], function($boot){ var smalltalk=$boot.vm,nil=$boot.nil,_st=$boot.asReceiver,globals=$boot.globals; smalltalk.addPackage('Helios-Debugger'); smalltalk.packages["Helios-Debugger"].transport = {"type":"amd","amdNamespace":"helios"}; smalltalk.addClass('HLContextInspectorDecorator', globals.Object, ['context'], 'Helios-Debugger'); smalltalk.addMethod( smalltalk.method({ selector: "context", protocol: 'accessing', fn: function (){ var self=this; var $1; $1=self["@context"]; return $1; }, args: [], source: "context\x0a\x09^ context", messageSends: [], referencedClasses: [] }), globals.HLContextInspectorDecorator); smalltalk.addMethod( smalltalk.method({ selector: "evaluate:on:", protocol: 'evaluating', fn: function (aString,anEvaluator){ var self=this; return smalltalk.withContext(function($ctx1) { var $1; $1=_st(self._context())._evaluate_on_(aString,anEvaluator); return $1; }, function($ctx1) {$ctx1.fill(self,"evaluate:on:",{aString:aString,anEvaluator:anEvaluator},globals.HLContextInspectorDecorator)})}, args: ["aString", "anEvaluator"], source: "evaluate: aString on: anEvaluator\x0a\x09^ self context evaluate: aString on: anEvaluator", messageSends: ["evaluate:on:", "context"], referencedClasses: [] }), globals.HLContextInspectorDecorator); smalltalk.addMethod( smalltalk.method({ selector: "initializeFromContext:", protocol: 'initialization', fn: function (aContext){ var self=this; self["@context"]=aContext; return self}, args: ["aContext"], source: "initializeFromContext: aContext\x0a\x09context := aContext", messageSends: [], referencedClasses: [] }), globals.HLContextInspectorDecorator); smalltalk.addMethod( smalltalk.method({ selector: "inspectOn:", protocol: 'inspecting', fn: function (anInspector){ var self=this; var variables,inspectedContext; function $Dictionary(){return globals.Dictionary||(typeof Dictionary=="undefined"?nil:Dictionary)} return smalltalk.withContext(function($ctx1) { var $1,$2,$3,$4,$receiver; variables=_st($Dictionary())._new(); inspectedContext=self._context(); $1=variables; $2=_st(inspectedContext)._locals(); $ctx1.sendIdx["locals"]=1; _st($1)._addAll_($2); $ctx1.sendIdx["addAll:"]=1; _st((function(){ return smalltalk.withContext(function($ctx2) { return _st(_st(inspectedContext)._notNil())._and_((function(){ return smalltalk.withContext(function($ctx3) { return _st(inspectedContext)._isBlockContext(); }, function($ctx3) {$ctx3.fillBlock({},$ctx2,2)})})); }, function($ctx2) {$ctx2.fillBlock({},$ctx1,1)})}))._whileTrue_((function(){ return smalltalk.withContext(function($ctx2) { inspectedContext=_st(inspectedContext)._outerContext(); inspectedContext; $3=inspectedContext; if(($receiver = $3) == null || $receiver.isNil){ return $3; } else { return _st(variables)._addAll_(_st(inspectedContext)._locals()); }; }, function($ctx2) {$ctx2.fillBlock({},$ctx1,3)})})); _st(anInspector)._setLabel_("Context"); $4=_st(anInspector)._setVariables_(variables); return self}, function($ctx1) {$ctx1.fill(self,"inspectOn:",{anInspector:anInspector,variables:variables,inspectedContext:inspectedContext},globals.HLContextInspectorDecorator)})}, args: ["anInspector"], source: "inspectOn: anInspector\x0a\x09| variables inspectedContext |\x0a\x09\x0a\x09variables := Dictionary new.\x0a\x09inspectedContext := self context.\x0a\x09\x0a\x09variables addAll: inspectedContext locals.\x0a\x09\x0a\x09[ inspectedContext notNil and: [ inspectedContext isBlockContext ] ] whileTrue: [\x0a\x09\x09inspectedContext := inspectedContext outerContext.\x0a\x09\x09inspectedContext ifNotNil: [\x0a\x09\x09\x09variables addAll: inspectedContext locals ] ].\x0a\x09\x0a\x09anInspector\x0a\x09\x09setLabel: 'Context';\x0a\x09\x09setVariables: variables", messageSends: ["new", "context", "addAll:", "locals", "whileTrue:", "and:", "notNil", "isBlockContext", "outerContext", "ifNotNil:", "setLabel:", "setVariables:"], referencedClasses: ["Dictionary"] }), globals.HLContextInspectorDecorator);<|fim▁hole|>smalltalk.addMethod( smalltalk.method({ selector: "on:", protocol: 'instance creation', fn: function (aContext){ var self=this; return smalltalk.withContext(function($ctx1) { var $2,$3,$1; $2=self._new(); _st($2)._initializeFromContext_(aContext); $3=_st($2)._yourself(); $1=$3; return $1; }, function($ctx1) {$ctx1.fill(self,"on:",{aContext:aContext},globals.HLContextInspectorDecorator.klass)})}, args: ["aContext"], source: "on: aContext\x0a\x09^ self new\x0a\x09\x09initializeFromContext: aContext;\x0a\x09\x09yourself", messageSends: ["initializeFromContext:", "new", "yourself"], referencedClasses: [] }), globals.HLContextInspectorDecorator.klass); smalltalk.addClass('HLDebugger', globals.HLFocusableWidget, ['model', 'stackListWidget', 'codeWidget', 'inspectorWidget'], 'Helios-Debugger'); globals.HLDebugger.comment="I am the main widget for the Helios debugger."; smalltalk.addMethod( smalltalk.method({ selector: "codeWidget", protocol: 'widgets', fn: function (){ var self=this; function $HLDebuggerCodeWidget(){return globals.HLDebuggerCodeWidget||(typeof HLDebuggerCodeWidget=="undefined"?nil:HLDebuggerCodeWidget)} function $HLDebuggerCodeModel(){return globals.HLDebuggerCodeModel||(typeof HLDebuggerCodeModel=="undefined"?nil:HLDebuggerCodeModel)} return smalltalk.withContext(function($ctx1) { var $2,$3,$4,$6,$7,$8,$9,$5,$10,$1,$receiver; $2=self["@codeWidget"]; if(($receiver = $2) == null || $receiver.isNil){ $3=_st($HLDebuggerCodeWidget())._new(); $ctx1.sendIdx["new"]=1; $4=$3; $6=_st($HLDebuggerCodeModel())._new(); $7=$6; $8=self._model(); $ctx1.sendIdx["model"]=1; _st($7)._debuggerModel_($8); $9=_st($6)._yourself(); $ctx1.sendIdx["yourself"]=1; $5=$9; _st($4)._model_($5); _st($3)._browserModel_(self._model()); $10=_st($3)._yourself(); self["@codeWidget"]=$10; $1=self["@codeWidget"]; } else { $1=$2; }; return $1; }, function($ctx1) {$ctx1.fill(self,"codeWidget",{},globals.HLDebugger)})}, args: [], source: "codeWidget\x0a\x09^ codeWidget ifNil: [ codeWidget := HLDebuggerCodeWidget new\x0a\x09\x09model: (HLDebuggerCodeModel new\x0a\x09\x09\x09debuggerModel: self model;\x0a\x09\x09\x09yourself);\x0a\x09\x09browserModel: self model;\x0a\x09\x09yourself ]", messageSends: ["ifNil:", "model:", "new", "debuggerModel:", "model", "yourself", "browserModel:"], referencedClasses: ["HLDebuggerCodeWidget", "HLDebuggerCodeModel"] }), globals.HLDebugger); smalltalk.addMethod( smalltalk.method({ selector: "cssClass", protocol: 'accessing', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { var $2,$1; $2=($ctx1.supercall = true, globals.HLDebugger.superclass.fn.prototype._cssClass.apply(_st(self), [])); $ctx1.supercall = false; $1=_st($2).__comma(" hl_debugger"); return $1; }, function($ctx1) {$ctx1.fill(self,"cssClass",{},globals.HLDebugger)})}, args: [], source: "cssClass\x0a\x09^ super cssClass, ' hl_debugger'", messageSends: [",", "cssClass"], referencedClasses: [] }), globals.HLDebugger); smalltalk.addMethod( smalltalk.method({ selector: "focus", protocol: 'actions', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { _st(self._stackListWidget())._focus(); return self}, function($ctx1) {$ctx1.fill(self,"focus",{},globals.HLDebugger)})}, args: [], source: "focus\x0a\x09self stackListWidget focus", messageSends: ["focus", "stackListWidget"], referencedClasses: [] }), globals.HLDebugger); smalltalk.addMethod( smalltalk.method({ selector: "initializeFromError:", protocol: 'initialization', fn: function (anError){ var self=this; function $HLDebuggerModel(){return globals.HLDebuggerModel||(typeof HLDebuggerModel=="undefined"?nil:HLDebuggerModel)} return smalltalk.withContext(function($ctx1) { self["@model"]=_st($HLDebuggerModel())._on_(anError); self._observeModel(); return self}, function($ctx1) {$ctx1.fill(self,"initializeFromError:",{anError:anError},globals.HLDebugger)})}, args: ["anError"], source: "initializeFromError: anError\x0a\x09model := HLDebuggerModel on: anError.\x0a\x09self observeModel", messageSends: ["on:", "observeModel"], referencedClasses: ["HLDebuggerModel"] }), globals.HLDebugger); smalltalk.addMethod( smalltalk.method({ selector: "inspectorWidget", protocol: 'widgets', fn: function (){ var self=this; function $HLInspectorWidget(){return globals.HLInspectorWidget||(typeof HLInspectorWidget=="undefined"?nil:HLInspectorWidget)} return smalltalk.withContext(function($ctx1) { var $2,$1,$receiver; $2=self["@inspectorWidget"]; if(($receiver = $2) == null || $receiver.isNil){ self["@inspectorWidget"]=_st($HLInspectorWidget())._new(); $1=self["@inspectorWidget"]; } else { $1=$2; }; return $1; }, function($ctx1) {$ctx1.fill(self,"inspectorWidget",{},globals.HLDebugger)})}, args: [], source: "inspectorWidget\x0a\x09^ inspectorWidget ifNil: [ \x0a\x09\x09inspectorWidget := HLInspectorWidget new ]", messageSends: ["ifNil:", "new"], referencedClasses: ["HLInspectorWidget"] }), globals.HLDebugger); smalltalk.addMethod( smalltalk.method({ selector: "model", protocol: 'accessing', fn: function (){ var self=this; function $HLDebuggerModel(){return globals.HLDebuggerModel||(typeof HLDebuggerModel=="undefined"?nil:HLDebuggerModel)} return smalltalk.withContext(function($ctx1) { var $2,$1,$receiver; $2=self["@model"]; if(($receiver = $2) == null || $receiver.isNil){ self["@model"]=_st($HLDebuggerModel())._new(); $1=self["@model"]; } else { $1=$2; }; return $1; }, function($ctx1) {$ctx1.fill(self,"model",{},globals.HLDebugger)})}, args: [], source: "model\x0a\x09^ model ifNil: [ model := HLDebuggerModel new ]", messageSends: ["ifNil:", "new"], referencedClasses: ["HLDebuggerModel"] }), globals.HLDebugger); smalltalk.addMethod( smalltalk.method({ selector: "observeModel", protocol: 'actions', fn: function (){ var self=this; function $HLDebuggerContextSelected(){return globals.HLDebuggerContextSelected||(typeof HLDebuggerContextSelected=="undefined"?nil:HLDebuggerContextSelected)} function $HLDebuggerStepped(){return globals.HLDebuggerStepped||(typeof HLDebuggerStepped=="undefined"?nil:HLDebuggerStepped)} function $HLDebuggerProceeded(){return globals.HLDebuggerProceeded||(typeof HLDebuggerProceeded=="undefined"?nil:HLDebuggerProceeded)} return smalltalk.withContext(function($ctx1) { var $1,$2; $1=_st(self._model())._announcer(); _st($1)._on_send_to_($HLDebuggerContextSelected(),"onContextSelected:",self); $ctx1.sendIdx["on:send:to:"]=1; _st($1)._on_send_to_($HLDebuggerStepped(),"onDebuggerStepped:",self); $ctx1.sendIdx["on:send:to:"]=2; $2=_st($1)._on_send_to_($HLDebuggerProceeded(),"onDebuggerProceeded",self); return self}, function($ctx1) {$ctx1.fill(self,"observeModel",{},globals.HLDebugger)})}, args: [], source: "observeModel\x0a\x09self model announcer \x0a\x09\x09on: HLDebuggerContextSelected\x0a\x09\x09send: #onContextSelected:\x0a\x09\x09to: self;\x0a\x09\x09\x0a\x09\x09on: HLDebuggerStepped\x0a\x09\x09send: #onDebuggerStepped:\x0a\x09\x09to: self;\x0a\x09\x09\x0a\x09\x09on: HLDebuggerProceeded\x0a\x09\x09send: #onDebuggerProceeded\x0a\x09\x09to: self", messageSends: ["on:send:to:", "announcer", "model"], referencedClasses: ["HLDebuggerContextSelected", "HLDebuggerStepped", "HLDebuggerProceeded"] }), globals.HLDebugger); smalltalk.addMethod( smalltalk.method({ selector: "onContextSelected:", protocol: 'reactions', fn: function (anAnnouncement){ var self=this; function $HLContextInspectorDecorator(){return globals.HLContextInspectorDecorator||(typeof HLContextInspectorDecorator=="undefined"?nil:HLContextInspectorDecorator)} return smalltalk.withContext(function($ctx1) { _st(self._inspectorWidget())._inspect_(_st($HLContextInspectorDecorator())._on_(_st(anAnnouncement)._context())); return self}, function($ctx1) {$ctx1.fill(self,"onContextSelected:",{anAnnouncement:anAnnouncement},globals.HLDebugger)})}, args: ["anAnnouncement"], source: "onContextSelected: anAnnouncement\x0a\x09self inspectorWidget inspect: (HLContextInspectorDecorator on: anAnnouncement context)", messageSends: ["inspect:", "inspectorWidget", "on:", "context"], referencedClasses: ["HLContextInspectorDecorator"] }), globals.HLDebugger); smalltalk.addMethod( smalltalk.method({ selector: "onDebuggerProceeded", protocol: 'reactions', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { self._removeTab(); return self}, function($ctx1) {$ctx1.fill(self,"onDebuggerProceeded",{},globals.HLDebugger)})}, args: [], source: "onDebuggerProceeded\x0a\x09self removeTab", messageSends: ["removeTab"], referencedClasses: [] }), globals.HLDebugger); smalltalk.addMethod( smalltalk.method({ selector: "onDebuggerStepped:", protocol: 'reactions', fn: function (anAnnouncement){ var self=this; function $HLContextInspectorDecorator(){return globals.HLContextInspectorDecorator||(typeof HLContextInspectorDecorator=="undefined"?nil:HLContextInspectorDecorator)} return smalltalk.withContext(function($ctx1) { var $1; $1=_st(self._model())._atEnd(); if(smalltalk.assert($1)){ self._removeTab(); }; _st(self._inspectorWidget())._inspect_(_st($HLContextInspectorDecorator())._on_(_st(anAnnouncement)._context())); _st(self._stackListWidget())._refresh(); return self}, function($ctx1) {$ctx1.fill(self,"onDebuggerStepped:",{anAnnouncement:anAnnouncement},globals.HLDebugger)})}, args: ["anAnnouncement"], source: "onDebuggerStepped: anAnnouncement\x0a\x09self model atEnd ifTrue: [ self removeTab ].\x0a\x09\x0a\x09self inspectorWidget inspect: (HLContextInspectorDecorator on: anAnnouncement context).\x0a\x09self stackListWidget refresh", messageSends: ["ifTrue:", "atEnd", "model", "removeTab", "inspect:", "inspectorWidget", "on:", "context", "refresh", "stackListWidget"], referencedClasses: ["HLContextInspectorDecorator"] }), globals.HLDebugger); smalltalk.addMethod( smalltalk.method({ selector: "registerBindingsOn:", protocol: 'keybindings', fn: function (aBindingGroup){ var self=this; function $HLToolCommand(){return globals.HLToolCommand||(typeof HLToolCommand=="undefined"?nil:HLToolCommand)} return smalltalk.withContext(function($ctx1) { _st($HLToolCommand())._registerConcreteClassesOn_for_(aBindingGroup,self._model()); return self}, function($ctx1) {$ctx1.fill(self,"registerBindingsOn:",{aBindingGroup:aBindingGroup},globals.HLDebugger)})}, args: ["aBindingGroup"], source: "registerBindingsOn: aBindingGroup\x0a\x09HLToolCommand \x0a\x09\x09registerConcreteClassesOn: aBindingGroup \x0a\x09\x09for: self model", messageSends: ["registerConcreteClassesOn:for:", "model"], referencedClasses: ["HLToolCommand"] }), globals.HLDebugger); smalltalk.addMethod( smalltalk.method({ selector: "renderContentOn:", protocol: 'rendering', fn: function (html){ var self=this; function $HLContainer(){return globals.HLContainer||(typeof HLContainer=="undefined"?nil:HLContainer)} function $HLVerticalSplitter(){return globals.HLVerticalSplitter||(typeof HLVerticalSplitter=="undefined"?nil:HLVerticalSplitter)} function $HLHorizontalSplitter(){return globals.HLHorizontalSplitter||(typeof HLHorizontalSplitter=="undefined"?nil:HLHorizontalSplitter)} return smalltalk.withContext(function($ctx1) { var $2,$1; self._renderHeadOn_(html); $2=_st($HLVerticalSplitter())._with_with_(self._codeWidget(),_st($HLHorizontalSplitter())._with_with_(self._stackListWidget(),self._inspectorWidget())); $ctx1.sendIdx["with:with:"]=1; $1=_st($HLContainer())._with_($2); _st(html)._with_($1); $ctx1.sendIdx["with:"]=1; return self}, function($ctx1) {$ctx1.fill(self,"renderContentOn:",{html:html},globals.HLDebugger)})}, args: ["html"], source: "renderContentOn: html\x0a\x09self renderHeadOn: html.\x0a\x09html with: (HLContainer with: (HLVerticalSplitter\x0a\x09\x09with: self codeWidget\x0a\x09\x09with: (HLHorizontalSplitter\x0a\x09\x09\x09with: self stackListWidget\x0a\x09\x09\x09with: self inspectorWidget)))", messageSends: ["renderHeadOn:", "with:", "with:with:", "codeWidget", "stackListWidget", "inspectorWidget"], referencedClasses: ["HLContainer", "HLVerticalSplitter", "HLHorizontalSplitter"] }), globals.HLDebugger); smalltalk.addMethod( smalltalk.method({ selector: "renderHeadOn:", protocol: 'rendering', fn: function (html){ var self=this; return smalltalk.withContext(function($ctx1) { var $1,$2; $1=_st(html)._div(); _st($1)._class_("head"); $2=_st($1)._with_((function(){ return smalltalk.withContext(function($ctx2) { return _st(_st(html)._h2())._with_(_st(_st(self._model())._error())._messageText()); }, function($ctx2) {$ctx2.fillBlock({},$ctx1,1)})})); $ctx1.sendIdx["with:"]=1; return self}, function($ctx1) {$ctx1.fill(self,"renderHeadOn:",{html:html},globals.HLDebugger)})}, args: ["html"], source: "renderHeadOn: html\x0a\x09html div \x0a\x09\x09class: 'head'; \x0a\x09\x09with: [ html h2 with: self model error messageText ]", messageSends: ["class:", "div", "with:", "h2", "messageText", "error", "model"], referencedClasses: [] }), globals.HLDebugger); smalltalk.addMethod( smalltalk.method({ selector: "stackListWidget", protocol: 'widgets', fn: function (){ var self=this; function $HLStackListWidget(){return globals.HLStackListWidget||(typeof HLStackListWidget=="undefined"?nil:HLStackListWidget)} return smalltalk.withContext(function($ctx1) { var $2,$3,$4,$1,$receiver; $2=self["@stackListWidget"]; if(($receiver = $2) == null || $receiver.isNil){ $3=_st($HLStackListWidget())._on_(self._model()); _st($3)._next_(self._codeWidget()); $4=_st($3)._yourself(); self["@stackListWidget"]=$4; $1=self["@stackListWidget"]; } else { $1=$2; }; return $1; }, function($ctx1) {$ctx1.fill(self,"stackListWidget",{},globals.HLDebugger)})}, args: [], source: "stackListWidget\x0a\x09^ stackListWidget ifNil: [ \x0a\x09\x09stackListWidget := (HLStackListWidget on: self model)\x0a\x09\x09\x09next: self codeWidget;\x0a\x09\x09\x09yourself ]", messageSends: ["ifNil:", "next:", "on:", "model", "codeWidget", "yourself"], referencedClasses: ["HLStackListWidget"] }), globals.HLDebugger); smalltalk.addMethod( smalltalk.method({ selector: "unregister", protocol: 'actions', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { ($ctx1.supercall = true, globals.HLDebugger.superclass.fn.prototype._unregister.apply(_st(self), [])); $ctx1.supercall = false; $ctx1.sendIdx["unregister"]=1; _st(self._inspectorWidget())._unregister(); return self}, function($ctx1) {$ctx1.fill(self,"unregister",{},globals.HLDebugger)})}, args: [], source: "unregister\x0a\x09super unregister.\x0a\x09self inspectorWidget unregister", messageSends: ["unregister", "inspectorWidget"], referencedClasses: [] }), globals.HLDebugger); smalltalk.addMethod( smalltalk.method({ selector: "on:", protocol: 'instance creation', fn: function (anError){ var self=this; return smalltalk.withContext(function($ctx1) { var $2,$3,$1; $2=self._new(); _st($2)._initializeFromError_(anError); $3=_st($2)._yourself(); $1=$3; return $1; }, function($ctx1) {$ctx1.fill(self,"on:",{anError:anError},globals.HLDebugger.klass)})}, args: ["anError"], source: "on: anError\x0a\x09^ self new\x0a\x09\x09initializeFromError: anError;\x0a\x09\x09yourself", messageSends: ["initializeFromError:", "new", "yourself"], referencedClasses: [] }), globals.HLDebugger.klass); smalltalk.addMethod( smalltalk.method({ selector: "tabClass", protocol: 'accessing', fn: function (){ var self=this; return "debugger"; }, args: [], source: "tabClass\x0a\x09^ 'debugger'", messageSends: [], referencedClasses: [] }), globals.HLDebugger.klass); smalltalk.addMethod( smalltalk.method({ selector: "tabLabel", protocol: 'accessing', fn: function (){ var self=this; return "Debugger"; }, args: [], source: "tabLabel\x0a\x09^ 'Debugger'", messageSends: [], referencedClasses: [] }), globals.HLDebugger.klass); smalltalk.addClass('HLDebuggerCodeModel', globals.HLCodeModel, ['debuggerModel'], 'Helios-Debugger'); smalltalk.addMethod( smalltalk.method({ selector: "debuggerModel", protocol: 'accessing', fn: function (){ var self=this; var $1; $1=self["@debuggerModel"]; return $1; }, args: [], source: "debuggerModel\x0a\x09^ debuggerModel", messageSends: [], referencedClasses: [] }), globals.HLDebuggerCodeModel); smalltalk.addMethod( smalltalk.method({ selector: "debuggerModel:", protocol: 'accessing', fn: function (anObject){ var self=this; self["@debuggerModel"]=anObject; return self}, args: ["anObject"], source: "debuggerModel: anObject\x0a\x09debuggerModel := anObject", messageSends: [], referencedClasses: [] }), globals.HLDebuggerCodeModel); smalltalk.addMethod( smalltalk.method({ selector: "doIt:", protocol: 'actions', fn: function (aString){ var self=this; function $ErrorHandler(){return globals.ErrorHandler||(typeof ErrorHandler=="undefined"?nil:ErrorHandler)} return smalltalk.withContext(function($ctx1) { var $1; $1=_st((function(){ return smalltalk.withContext(function($ctx2) { return _st(self._debuggerModel())._evaluate_(aString); }, function($ctx2) {$ctx2.fillBlock({},$ctx1,1)})}))._tryCatch_((function(e){ return smalltalk.withContext(function($ctx2) { _st($ErrorHandler())._handleError_(e); return nil; }, function($ctx2) {$ctx2.fillBlock({e:e},$ctx1,2)})})); return $1; }, function($ctx1) {$ctx1.fill(self,"doIt:",{aString:aString},globals.HLDebuggerCodeModel)})}, args: ["aString"], source: "doIt: aString\x0a\x09^ [ self debuggerModel evaluate: aString ]\x0a\x09\x09tryCatch: [ :e | \x0a\x09\x09\x09ErrorHandler handleError: e.\x0a\x09\x09\x09nil ]", messageSends: ["tryCatch:", "evaluate:", "debuggerModel", "handleError:"], referencedClasses: ["ErrorHandler"] }), globals.HLDebuggerCodeModel); smalltalk.addClass('HLDebuggerCodeWidget', globals.HLBrowserCodeWidget, [], 'Helios-Debugger'); smalltalk.addMethod( smalltalk.method({ selector: "addStopAt:", protocol: 'actions', fn: function (anInteger){ var self=this; return smalltalk.withContext(function($ctx1) { _st(self["@editor"])._setGutterMarker_gutter_value_(anInteger,"stops",_st(_st("<div class=\x22stop\x22></stop>"._asJQuery())._toArray())._first()); return self}, function($ctx1) {$ctx1.fill(self,"addStopAt:",{anInteger:anInteger},globals.HLDebuggerCodeWidget)})}, args: ["anInteger"], source: "addStopAt: anInteger\x0a\x09editor\x0a\x09\x09setGutterMarker: anInteger\x0a\x09\x09gutter: 'stops'\x0a\x09\x09value: '<div class=\x22stop\x22></stop>' asJQuery toArray first", messageSends: ["setGutterMarker:gutter:value:", "first", "toArray", "asJQuery"], referencedClasses: [] }), globals.HLDebuggerCodeWidget); smalltalk.addMethod( smalltalk.method({ selector: "clearHighlight", protocol: 'actions', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { _st(self._editor())._clearGutter_("stops"); return self}, function($ctx1) {$ctx1.fill(self,"clearHighlight",{},globals.HLDebuggerCodeWidget)})}, args: [], source: "clearHighlight\x0a\x09self editor clearGutter: 'stops'", messageSends: ["clearGutter:", "editor"], referencedClasses: [] }), globals.HLDebuggerCodeWidget); smalltalk.addMethod( smalltalk.method({ selector: "contents:", protocol: 'accessing', fn: function (aString){ var self=this; return smalltalk.withContext(function($ctx1) { self._clearHighlight(); ($ctx1.supercall = true, globals.HLDebuggerCodeWidget.superclass.fn.prototype._contents_.apply(_st(self), [aString])); $ctx1.supercall = false; return self}, function($ctx1) {$ctx1.fill(self,"contents:",{aString:aString},globals.HLDebuggerCodeWidget)})}, args: ["aString"], source: "contents: aString\x0a\x09self clearHighlight.\x0a\x09super contents: aString", messageSends: ["clearHighlight", "contents:"], referencedClasses: [] }), globals.HLDebuggerCodeWidget); smalltalk.addMethod( smalltalk.method({ selector: "editorOptions", protocol: 'accessing', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { var $2,$3,$1; $2=($ctx1.supercall = true, globals.HLDebuggerCodeWidget.superclass.fn.prototype._editorOptions.apply(_st(self), [])); $ctx1.supercall = false; _st($2)._at_put_("gutters",["CodeMirror-linenumbers", "stops"]); $3=_st($2)._yourself(); $1=$3; return $1; }, function($ctx1) {$ctx1.fill(self,"editorOptions",{},globals.HLDebuggerCodeWidget)})}, args: [], source: "editorOptions\x0a\x09^ super editorOptions\x0a\x09\x09at: 'gutters' put: #('CodeMirror-linenumbers' 'stops');\x0a\x09\x09yourself", messageSends: ["at:put:", "editorOptions", "yourself"], referencedClasses: [] }), globals.HLDebuggerCodeWidget); smalltalk.addMethod( smalltalk.method({ selector: "highlight", protocol: 'actions', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { var $1,$receiver; $1=_st(self._browserModel())._nextNode(); if(($receiver = $1) == null || $receiver.isNil){ $1; } else { var node; node=$receiver; self._highlightNode_(node); }; return self}, function($ctx1) {$ctx1.fill(self,"highlight",{},globals.HLDebuggerCodeWidget)})}, args: [], source: "highlight\x0a\x09self browserModel nextNode ifNotNil: [ :node |\x0a\x09\x09self highlightNode: node ]", messageSends: ["ifNotNil:", "nextNode", "browserModel", "highlightNode:"], referencedClasses: [] }), globals.HLDebuggerCodeWidget); smalltalk.addMethod( smalltalk.method({ selector: "highlightNode:", protocol: 'actions', fn: function (aNode){ var self=this; var token; return smalltalk.withContext(function($ctx1) { var $4,$3,$2,$1,$5,$9,$8,$7,$11,$10,$6,$15,$14,$13,$12,$receiver; if(($receiver = aNode) == null || $receiver.isNil){ aNode; } else { self._clearHighlight(); $4=_st(aNode)._positionStart(); $ctx1.sendIdx["positionStart"]=1; $3=_st($4)._x(); $ctx1.sendIdx["x"]=1; $2=_st($3).__minus((1)); $ctx1.sendIdx["-"]=1; $1=self._addStopAt_($2); $1; $5=self._editor(); $9=_st(aNode)._positionStart(); $ctx1.sendIdx["positionStart"]=2; $8=_st($9)._x(); $ctx1.sendIdx["x"]=2; $7=_st($8).__minus((1)); $ctx1.sendIdx["-"]=2; $11=_st(_st(aNode)._positionStart())._y(); $ctx1.sendIdx["y"]=1; $10=_st($11).__minus((1)); $ctx1.sendIdx["-"]=3; $6=globals.HashedCollection._newFromPairs_(["line",$7,"ch",$10]); $15=_st(aNode)._positionEnd(); $ctx1.sendIdx["positionEnd"]=1; $14=_st($15)._x(); $13=_st($14).__minus((1)); $12=globals.HashedCollection._newFromPairs_(["line",$13,"ch",_st(_st(aNode)._positionEnd())._y()]); _st($5)._setSelection_to_($6,$12); }; return self}, function($ctx1) {$ctx1.fill(self,"highlightNode:",{aNode:aNode,token:token},globals.HLDebuggerCodeWidget)})}, args: ["aNode"], source: "highlightNode: aNode\x0a\x09| token |\x0a\x09\x0a\x09aNode ifNotNil: [\x0a\x09\x09self\x0a\x09\x09\x09clearHighlight;\x0a\x09\x09\x09addStopAt: aNode positionStart x - 1.\x0a\x0a\x09\x09self editor \x0a\x09\x09\x09setSelection: #{ 'line' -> (aNode positionStart x - 1). 'ch' -> (aNode positionStart y - 1) }\x0a\x09\x09\x09to: #{ 'line' -> (aNode positionEnd x - 1). 'ch' -> (aNode positionEnd y) } ]", messageSends: ["ifNotNil:", "clearHighlight", "addStopAt:", "-", "x", "positionStart", "setSelection:to:", "editor", "y", "positionEnd"], referencedClasses: [] }), globals.HLDebuggerCodeWidget); smalltalk.addMethod( smalltalk.method({ selector: "observeBrowserModel", protocol: 'actions', fn: function (){ var self=this; function $HLDebuggerContextSelected(){return globals.HLDebuggerContextSelected||(typeof HLDebuggerContextSelected=="undefined"?nil:HLDebuggerContextSelected)} function $HLDebuggerStepped(){return globals.HLDebuggerStepped||(typeof HLDebuggerStepped=="undefined"?nil:HLDebuggerStepped)} function $HLDebuggerWhere(){return globals.HLDebuggerWhere||(typeof HLDebuggerWhere=="undefined"?nil:HLDebuggerWhere)} return smalltalk.withContext(function($ctx1) { var $2,$1,$4,$3; ($ctx1.supercall = true, globals.HLDebuggerCodeWidget.superclass.fn.prototype._observeBrowserModel.apply(_st(self), [])); $ctx1.supercall = false; $2=self._browserModel(); $ctx1.sendIdx["browserModel"]=1; $1=_st($2)._announcer(); $ctx1.sendIdx["announcer"]=1; _st($1)._on_send_to_($HLDebuggerContextSelected(),"onContextSelected",self); $ctx1.sendIdx["on:send:to:"]=1; $4=self._browserModel(); $ctx1.sendIdx["browserModel"]=2; $3=_st($4)._announcer(); $ctx1.sendIdx["announcer"]=2; _st($3)._on_send_to_($HLDebuggerStepped(),"onContextSelected",self); $ctx1.sendIdx["on:send:to:"]=2; _st(_st(self._browserModel())._announcer())._on_send_to_($HLDebuggerWhere(),"onContextSelected",self); return self}, function($ctx1) {$ctx1.fill(self,"observeBrowserModel",{},globals.HLDebuggerCodeWidget)})}, args: [], source: "observeBrowserModel\x0a\x09super observeBrowserModel.\x0a\x09\x0a\x09self browserModel announcer \x0a\x09\x09on: HLDebuggerContextSelected\x0a\x09\x09send: #onContextSelected\x0a\x09\x09to: self.\x0a\x09\x0a\x09self browserModel announcer \x0a\x09\x09on: HLDebuggerStepped\x0a\x09\x09send: #onContextSelected\x0a\x09\x09to: self.\x0a\x09\x0a\x09self browserModel announcer \x0a\x09\x09on: HLDebuggerWhere\x0a\x09\x09send: #onContextSelected\x0a\x09\x09to: self", messageSends: ["observeBrowserModel", "on:send:to:", "announcer", "browserModel"], referencedClasses: ["HLDebuggerContextSelected", "HLDebuggerStepped", "HLDebuggerWhere"] }), globals.HLDebuggerCodeWidget); smalltalk.addMethod( smalltalk.method({ selector: "onContextSelected", protocol: 'reactions', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { self._highlight(); return self}, function($ctx1) {$ctx1.fill(self,"onContextSelected",{},globals.HLDebuggerCodeWidget)})}, args: [], source: "onContextSelected\x0a\x09self highlight", messageSends: ["highlight"], referencedClasses: [] }), globals.HLDebuggerCodeWidget); smalltalk.addMethod( smalltalk.method({ selector: "renderOn:", protocol: 'rendering', fn: function (html){ var self=this; return smalltalk.withContext(function($ctx1) { ($ctx1.supercall = true, globals.HLDebuggerCodeWidget.superclass.fn.prototype._renderOn_.apply(_st(self), [html])); $ctx1.supercall = false; self._contents_(_st(_st(self._browserModel())._selectedMethod())._source()); return self}, function($ctx1) {$ctx1.fill(self,"renderOn:",{html:html},globals.HLDebuggerCodeWidget)})}, args: ["html"], source: "renderOn: html\x0a\x09super renderOn: html.\x0a\x09self contents: self browserModel selectedMethod source", messageSends: ["renderOn:", "contents:", "source", "selectedMethod", "browserModel"], referencedClasses: [] }), globals.HLDebuggerCodeWidget); smalltalk.addClass('HLDebuggerModel', globals.HLToolModel, ['rootContext', 'debugger', 'error'], 'Helios-Debugger'); globals.HLDebuggerModel.comment="I am a model for debugging Amber code in Helios.\x0a\x0aMy instances hold a reference to an `ASTDebugger` instance, itself referencing the current `context`. The context should be the root of the context stack."; smalltalk.addMethod( smalltalk.method({ selector: "atEnd", protocol: 'testing', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { var $1; $1=_st(self._debugger())._atEnd(); return $1; }, function($ctx1) {$ctx1.fill(self,"atEnd",{},globals.HLDebuggerModel)})}, args: [], source: "atEnd\x0a\x09^ self debugger atEnd", messageSends: ["atEnd", "debugger"], referencedClasses: [] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "contexts", protocol: 'accessing', fn: function (){ var self=this; var contexts,context; function $OrderedCollection(){return globals.OrderedCollection||(typeof OrderedCollection=="undefined"?nil:OrderedCollection)} return smalltalk.withContext(function($ctx1) { var $1; contexts=_st($OrderedCollection())._new(); context=self._rootContext(); _st((function(){ return smalltalk.withContext(function($ctx2) { return _st(context)._notNil(); }, function($ctx2) {$ctx2.fillBlock({},$ctx1,1)})}))._whileTrue_((function(){ return smalltalk.withContext(function($ctx2) { _st(contexts)._add_(context); context=_st(context)._outerContext(); return context; }, function($ctx2) {$ctx2.fillBlock({},$ctx1,2)})})); $1=contexts; return $1; }, function($ctx1) {$ctx1.fill(self,"contexts",{contexts:contexts,context:context},globals.HLDebuggerModel)})}, args: [], source: "contexts\x0a\x09| contexts context |\x0a\x09\x0a\x09contexts := OrderedCollection new.\x0a\x09context := self rootContext.\x0a\x09\x0a\x09[ context notNil ] whileTrue: [\x0a\x09\x09contexts add: context.\x0a\x09\x09context := context outerContext ].\x0a\x09\x09\x0a\x09^ contexts", messageSends: ["new", "rootContext", "whileTrue:", "notNil", "add:", "outerContext"], referencedClasses: ["OrderedCollection"] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "currentContext", protocol: 'accessing', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { var $1; $1=_st(self._debugger())._context(); return $1; }, function($ctx1) {$ctx1.fill(self,"currentContext",{},globals.HLDebuggerModel)})}, args: [], source: "currentContext\x0a\x09^ self debugger context", messageSends: ["context", "debugger"], referencedClasses: [] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "currentContext:", protocol: 'accessing', fn: function (aContext){ var self=this; function $HLDebuggerContextSelected(){return globals.HLDebuggerContextSelected||(typeof HLDebuggerContextSelected=="undefined"?nil:HLDebuggerContextSelected)} return smalltalk.withContext(function($ctx1) { var $1,$2; self._withChangesDo_((function(){ return smalltalk.withContext(function($ctx2) { self._selectedMethod_(_st(aContext)._method()); _st(self._debugger())._context_(aContext); $ctx2.sendIdx["context:"]=1; $1=_st($HLDebuggerContextSelected())._new(); _st($1)._context_(aContext); $2=_st($1)._yourself(); return _st(self._announcer())._announce_($2); }, function($ctx2) {$ctx2.fillBlock({},$ctx1,1)})})); return self}, function($ctx1) {$ctx1.fill(self,"currentContext:",{aContext:aContext},globals.HLDebuggerModel)})}, args: ["aContext"], source: "currentContext: aContext\x0a\x09self withChangesDo: [ \x0a\x09\x09self selectedMethod: aContext method.\x0a\x09\x09self debugger context: aContext.\x0a\x09\x09self announcer announce: (HLDebuggerContextSelected new\x0a\x09\x09\x09context: aContext;\x0a\x09\x09\x09yourself) ]", messageSends: ["withChangesDo:", "selectedMethod:", "method", "context:", "debugger", "announce:", "announcer", "new", "yourself"], referencedClasses: ["HLDebuggerContextSelected"] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "debugger", protocol: 'accessing', fn: function (){ var self=this; function $ASTDebugger(){return globals.ASTDebugger||(typeof ASTDebugger=="undefined"?nil:ASTDebugger)} return smalltalk.withContext(function($ctx1) { var $2,$1,$receiver; $2=self["@debugger"]; if(($receiver = $2) == null || $receiver.isNil){ self["@debugger"]=_st($ASTDebugger())._new(); $1=self["@debugger"]; } else { $1=$2; }; return $1; }, function($ctx1) {$ctx1.fill(self,"debugger",{},globals.HLDebuggerModel)})}, args: [], source: "debugger\x0a\x09^ debugger ifNil: [ debugger := ASTDebugger new ]", messageSends: ["ifNil:", "new"], referencedClasses: ["ASTDebugger"] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "error", protocol: 'accessing', fn: function (){ var self=this; var $1; $1=self["@error"]; return $1; }, args: [], source: "error\x0a\x09^ error", messageSends: [], referencedClasses: [] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "evaluate:", protocol: 'evaluating', fn: function (aString){ var self=this; return smalltalk.withContext(function($ctx1) { var $1; $1=_st(self._environment())._evaluate_for_(aString,self._currentContext()); return $1; }, function($ctx1) {$ctx1.fill(self,"evaluate:",{aString:aString},globals.HLDebuggerModel)})}, args: ["aString"], source: "evaluate: aString\x0a\x09^ self environment \x0a\x09\x09evaluate: aString \x0a\x09\x09for: self currentContext", messageSends: ["evaluate:for:", "environment", "currentContext"], referencedClasses: [] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "flushInnerContexts", protocol: 'private', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { var $1; $1=self._currentContext(); $ctx1.sendIdx["currentContext"]=1; _st($1)._innerContext_(nil); self["@rootContext"]=self._currentContext(); self._initializeContexts(); return self}, function($ctx1) {$ctx1.fill(self,"flushInnerContexts",{},globals.HLDebuggerModel)})}, args: [], source: "flushInnerContexts\x0a\x09\x22When stepping, the inner contexts are not relevent anymore,\x0a\x09and can be flushed\x22\x0a\x09\x0a\x09self currentContext innerContext: nil.\x0a\x09rootContext := self currentContext.\x0a\x09self initializeContexts", messageSends: ["innerContext:", "currentContext", "initializeContexts"], referencedClasses: [] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "initializeFromError:", protocol: 'initialization', fn: function (anError){ var self=this; var errorContext; function $AIContext(){return globals.AIContext||(typeof AIContext=="undefined"?nil:AIContext)} return smalltalk.withContext(function($ctx1) { self["@error"]=anError; errorContext=_st($AIContext())._fromMethodContext_(_st(self["@error"])._context()); self["@rootContext"]=_st(self["@error"])._signalerContextFrom_(errorContext); self._selectedMethod_(_st(self["@rootContext"])._method()); return self}, function($ctx1) {$ctx1.fill(self,"initializeFromError:",{anError:anError,errorContext:errorContext},globals.HLDebuggerModel)})}, args: ["anError"], source: "initializeFromError: anError\x0a\x09| errorContext |\x0a\x09\x0a\x09error := anError.\x0a\x09errorContext := (AIContext fromMethodContext: error context).\x0a\x09rootContext := error signalerContextFrom: errorContext.\x0a\x09self selectedMethod: rootContext method", messageSends: ["fromMethodContext:", "context", "signalerContextFrom:", "selectedMethod:", "method"], referencedClasses: ["AIContext"] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "nextNode", protocol: 'accessing', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { var $1; $1=_st(self._debugger())._node(); return $1; }, function($ctx1) {$ctx1.fill(self,"nextNode",{},globals.HLDebuggerModel)})}, args: [], source: "nextNode\x0a\x09^ self debugger node", messageSends: ["node", "debugger"], referencedClasses: [] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "onStep", protocol: 'reactions', fn: function (){ var self=this; function $HLDebuggerContextSelected(){return globals.HLDebuggerContextSelected||(typeof HLDebuggerContextSelected=="undefined"?nil:HLDebuggerContextSelected)} return smalltalk.withContext(function($ctx1) { var $2,$1,$3,$4; self["@rootContext"]=self._currentContext(); $ctx1.sendIdx["currentContext"]=1; $2=self._currentContext(); $ctx1.sendIdx["currentContext"]=2; $1=_st($2)._method(); self._selectedMethod_($1); $3=_st($HLDebuggerContextSelected())._new(); _st($3)._context_(self._currentContext()); $4=_st($3)._yourself(); _st(self._announcer())._announce_($4); return self}, function($ctx1) {$ctx1.fill(self,"onStep",{},globals.HLDebuggerModel)})}, args: [], source: "onStep\x0a\x09rootContext := self currentContext.\x0a\x09\x0a\x09\x22Force a refresh of the context list and code widget\x22\x0a\x09self selectedMethod: self currentContext method.\x0a\x09self announcer announce: (HLDebuggerContextSelected new\x0a\x09\x09context: self currentContext;\x0a\x09\x09yourself)", messageSends: ["currentContext", "selectedMethod:", "method", "announce:", "announcer", "context:", "new", "yourself"], referencedClasses: ["HLDebuggerContextSelected"] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "proceed", protocol: 'actions', fn: function (){ var self=this; function $HLDebuggerProceeded(){return globals.HLDebuggerProceeded||(typeof HLDebuggerProceeded=="undefined"?nil:HLDebuggerProceeded)} return smalltalk.withContext(function($ctx1) { _st(self._debugger())._proceed(); _st(self._announcer())._announce_(_st($HLDebuggerProceeded())._new()); return self}, function($ctx1) {$ctx1.fill(self,"proceed",{},globals.HLDebuggerModel)})}, args: [], source: "proceed\x0a\x09self debugger proceed.\x0a\x09\x0a\x09self announcer announce: HLDebuggerProceeded new", messageSends: ["proceed", "debugger", "announce:", "announcer", "new"], referencedClasses: ["HLDebuggerProceeded"] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "restart", protocol: 'actions', fn: function (){ var self=this; function $HLDebuggerStepped(){return globals.HLDebuggerStepped||(typeof HLDebuggerStepped=="undefined"?nil:HLDebuggerStepped)} return smalltalk.withContext(function($ctx1) { var $1,$2; _st(self._debugger())._restart(); self._onStep(); $1=_st($HLDebuggerStepped())._new(); _st($1)._context_(self._currentContext()); $2=_st($1)._yourself(); _st(self._announcer())._announce_($2); return self}, function($ctx1) {$ctx1.fill(self,"restart",{},globals.HLDebuggerModel)})}, args: [], source: "restart\x0a\x09self debugger restart.\x0a\x09self onStep.\x0a\x09\x0a\x09self announcer announce: (HLDebuggerStepped new\x0a\x09\x09context: self currentContext;\x0a\x09\x09yourself)", messageSends: ["restart", "debugger", "onStep", "announce:", "announcer", "context:", "new", "currentContext", "yourself"], referencedClasses: ["HLDebuggerStepped"] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "rootContext", protocol: 'accessing', fn: function (){ var self=this; var $1; $1=self["@rootContext"]; return $1; }, args: [], source: "rootContext\x0a\x09^ rootContext", messageSends: [], referencedClasses: [] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "stepOver", protocol: 'actions', fn: function (){ var self=this; function $HLDebuggerStepped(){return globals.HLDebuggerStepped||(typeof HLDebuggerStepped=="undefined"?nil:HLDebuggerStepped)} return smalltalk.withContext(function($ctx1) { var $1,$2; _st(self._debugger())._stepOver(); self._onStep(); $1=_st($HLDebuggerStepped())._new(); _st($1)._context_(self._currentContext()); $2=_st($1)._yourself(); _st(self._announcer())._announce_($2); return self}, function($ctx1) {$ctx1.fill(self,"stepOver",{},globals.HLDebuggerModel)})}, args: [], source: "stepOver\x0a\x09self debugger stepOver.\x0a\x09self onStep.\x0a\x09\x0a\x09self announcer announce: (HLDebuggerStepped new\x0a\x09\x09context: self currentContext;\x0a\x09\x09yourself)", messageSends: ["stepOver", "debugger", "onStep", "announce:", "announcer", "context:", "new", "currentContext", "yourself"], referencedClasses: ["HLDebuggerStepped"] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "where", protocol: 'actions', fn: function (){ var self=this; function $HLDebuggerWhere(){return globals.HLDebuggerWhere||(typeof HLDebuggerWhere=="undefined"?nil:HLDebuggerWhere)} return smalltalk.withContext(function($ctx1) { _st(self._announcer())._announce_(_st($HLDebuggerWhere())._new()); return self}, function($ctx1) {$ctx1.fill(self,"where",{},globals.HLDebuggerModel)})}, args: [], source: "where\x0a\x09self announcer announce: HLDebuggerWhere new", messageSends: ["announce:", "announcer", "new"], referencedClasses: ["HLDebuggerWhere"] }), globals.HLDebuggerModel); smalltalk.addMethod( smalltalk.method({ selector: "on:", protocol: 'instance creation', fn: function (anError){ var self=this; return smalltalk.withContext(function($ctx1) { var $2,$3,$1; $2=self._new(); _st($2)._initializeFromError_(anError); $3=_st($2)._yourself(); $1=$3; return $1; }, function($ctx1) {$ctx1.fill(self,"on:",{anError:anError},globals.HLDebuggerModel.klass)})}, args: ["anError"], source: "on: anError\x0a\x09^ self new\x0a\x09\x09initializeFromError: anError;\x0a\x09\x09yourself", messageSends: ["initializeFromError:", "new", "yourself"], referencedClasses: [] }), globals.HLDebuggerModel.klass); smalltalk.addClass('HLErrorHandler', globals.Object, [], 'Helios-Debugger'); smalltalk.addMethod( smalltalk.method({ selector: "confirmDebugError:", protocol: 'error handling', fn: function (anError){ var self=this; function $HLConfirmationWidget(){return globals.HLConfirmationWidget||(typeof HLConfirmationWidget=="undefined"?nil:HLConfirmationWidget)} return smalltalk.withContext(function($ctx1) { var $1,$2; $1=_st($HLConfirmationWidget())._new(); _st($1)._confirmationString_(_st(anError)._messageText()); _st($1)._actionBlock_((function(){ return smalltalk.withContext(function($ctx2) { return self._debugError_(anError); }, function($ctx2) {$ctx2.fillBlock({},$ctx1,1)})})); _st($1)._cancelButtonLabel_("Abandon"); _st($1)._confirmButtonLabel_("Debug"); $2=_st($1)._show(); return self}, function($ctx1) {$ctx1.fill(self,"confirmDebugError:",{anError:anError},globals.HLErrorHandler)})}, args: ["anError"], source: "confirmDebugError: anError\x0a\x09HLConfirmationWidget new\x0a\x09\x09confirmationString: anError messageText;\x0a\x09\x09actionBlock: [ self debugError: anError ];\x0a\x09\x09cancelButtonLabel: 'Abandon';\x0a\x09\x09confirmButtonLabel: 'Debug';\x0a\x09\x09show", messageSends: ["confirmationString:", "new", "messageText", "actionBlock:", "debugError:", "cancelButtonLabel:", "confirmButtonLabel:", "show"], referencedClasses: ["HLConfirmationWidget"] }), globals.HLErrorHandler); smalltalk.addMethod( smalltalk.method({ selector: "debugError:", protocol: 'error handling', fn: function (anError){ var self=this; function $HLDebugger(){return globals.HLDebugger||(typeof HLDebugger=="undefined"?nil:HLDebugger)} function $Error(){return globals.Error||(typeof Error=="undefined"?nil:Error)} function $ConsoleErrorHandler(){return globals.ConsoleErrorHandler||(typeof ConsoleErrorHandler=="undefined"?nil:ConsoleErrorHandler)} return smalltalk.withContext(function($ctx1) { _st((function(){ return smalltalk.withContext(function($ctx2) { return _st(_st($HLDebugger())._on_(anError))._openAsTab(); }, function($ctx2) {$ctx2.fillBlock({},$ctx1,1)})}))._on_do_($Error(),(function(error){ return smalltalk.withContext(function($ctx2) { return _st(_st($ConsoleErrorHandler())._new())._handleError_(error); }, function($ctx2) {$ctx2.fillBlock({error:error},$ctx1,2)})})); return self}, function($ctx1) {$ctx1.fill(self,"debugError:",{anError:anError},globals.HLErrorHandler)})}, args: ["anError"], source: "debugError: anError\x0a\x0a\x09[ \x0a\x09\x09(HLDebugger on: anError) openAsTab \x0a\x09] \x0a\x09\x09on: Error \x0a\x09\x09do: [ :error | ConsoleErrorHandler new handleError: error ]", messageSends: ["on:do:", "openAsTab", "on:", "handleError:", "new"], referencedClasses: ["HLDebugger", "Error", "ConsoleErrorHandler"] }), globals.HLErrorHandler); smalltalk.addMethod( smalltalk.method({ selector: "handleError:", protocol: 'error handling', fn: function (anError){ var self=this; return smalltalk.withContext(function($ctx1) { self._confirmDebugError_(anError); return self}, function($ctx1) {$ctx1.fill(self,"handleError:",{anError:anError},globals.HLErrorHandler)})}, args: ["anError"], source: "handleError: anError\x0a\x09self confirmDebugError: anError", messageSends: ["confirmDebugError:"], referencedClasses: [] }), globals.HLErrorHandler); smalltalk.addMethod( smalltalk.method({ selector: "onErrorHandled", protocol: 'error handling', fn: function (){ var self=this; function $HLProgressWidget(){return globals.HLProgressWidget||(typeof HLProgressWidget=="undefined"?nil:HLProgressWidget)} return smalltalk.withContext(function($ctx1) { var $1,$2; $1=_st($HLProgressWidget())._default(); _st($1)._flush(); $2=_st($1)._remove(); return self}, function($ctx1) {$ctx1.fill(self,"onErrorHandled",{},globals.HLErrorHandler)})}, args: [], source: "onErrorHandled\x0a\x09\x22when an error is handled, we need to make sure that\x0a\x09any progress bar widget gets removed. Because HLProgressBarWidget is asynchronous,\x0a\x09it has to be done here.\x22\x0a\x09\x0a\x09HLProgressWidget default \x0a\x09\x09flush; \x0a\x09\x09remove", messageSends: ["flush", "default", "remove"], referencedClasses: ["HLProgressWidget"] }), globals.HLErrorHandler); smalltalk.addClass('HLStackListWidget', globals.HLToolListWidget, [], 'Helios-Debugger'); smalltalk.addMethod( smalltalk.method({ selector: "items", protocol: 'accessing', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { var $1; $1=_st(self._model())._contexts(); return $1; }, function($ctx1) {$ctx1.fill(self,"items",{},globals.HLStackListWidget)})}, args: [], source: "items\x0a\x09^ self model contexts", messageSends: ["contexts", "model"], referencedClasses: [] }), globals.HLStackListWidget); smalltalk.addMethod( smalltalk.method({ selector: "label", protocol: 'accessing', fn: function (){ var self=this; return "Call stack"; }, args: [], source: "label\x0a\x09^ 'Call stack'", messageSends: [], referencedClasses: [] }), globals.HLStackListWidget); smalltalk.addMethod( smalltalk.method({ selector: "observeModel", protocol: 'actions', fn: function (){ var self=this; function $HLDebuggerStepped(){return globals.HLDebuggerStepped||(typeof HLDebuggerStepped=="undefined"?nil:HLDebuggerStepped)} return smalltalk.withContext(function($ctx1) { ($ctx1.supercall = true, globals.HLStackListWidget.superclass.fn.prototype._observeModel.apply(_st(self), [])); $ctx1.supercall = false; _st(_st(self._model())._announcer())._on_send_to_($HLDebuggerStepped(),"onDebuggerStepped:",self); return self}, function($ctx1) {$ctx1.fill(self,"observeModel",{},globals.HLStackListWidget)})}, args: [], source: "observeModel\x0a\x09super observeModel.\x0a\x09\x0a\x09self model announcer \x0a\x09\x09on: HLDebuggerStepped\x0a\x09\x09send: #onDebuggerStepped:\x0a\x09\x09to: self", messageSends: ["observeModel", "on:send:to:", "announcer", "model"], referencedClasses: ["HLDebuggerStepped"] }), globals.HLStackListWidget); smalltalk.addMethod( smalltalk.method({ selector: "onDebuggerStepped:", protocol: 'reactions', fn: function (anAnnouncement){ var self=this; return smalltalk.withContext(function($ctx1) { self["@items"]=nil; self._refresh(); return self}, function($ctx1) {$ctx1.fill(self,"onDebuggerStepped:",{anAnnouncement:anAnnouncement},globals.HLStackListWidget)})}, args: ["anAnnouncement"], source: "onDebuggerStepped: anAnnouncement\x0a\x09items := nil.\x0a\x09self refresh", messageSends: ["refresh"], referencedClasses: [] }), globals.HLStackListWidget); smalltalk.addMethod( smalltalk.method({ selector: "proceed", protocol: 'actions', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { _st(self._model())._proceed(); return self}, function($ctx1) {$ctx1.fill(self,"proceed",{},globals.HLStackListWidget)})}, args: [], source: "proceed\x0a\x09self model proceed", messageSends: ["proceed", "model"], referencedClasses: [] }), globals.HLStackListWidget); smalltalk.addMethod( smalltalk.method({ selector: "renderButtonsOn:", protocol: 'rendering', fn: function (html){ var self=this; return smalltalk.withContext(function($ctx1) { var $1,$3,$4,$5,$6,$7,$8,$9,$10,$2; $1=_st(html)._div(); _st($1)._class_("debugger_bar"); $ctx1.sendIdx["class:"]=1; $2=_st($1)._with_((function(){ return smalltalk.withContext(function($ctx2) { $3=_st(html)._button(); $ctx2.sendIdx["button"]=1; _st($3)._class_("btn restart"); $ctx2.sendIdx["class:"]=2; _st($3)._with_("Restart"); $ctx2.sendIdx["with:"]=2; $4=_st($3)._onClick_((function(){ return smalltalk.withContext(function($ctx3) { return self._restart(); }, function($ctx3) {$ctx3.fillBlock({},$ctx2,2)})})); $ctx2.sendIdx["onClick:"]=1; $4; $5=_st(html)._button(); $ctx2.sendIdx["button"]=2; _st($5)._class_("btn where"); $ctx2.sendIdx["class:"]=3; _st($5)._with_("Where"); $ctx2.sendIdx["with:"]=3; $6=_st($5)._onClick_((function(){ return smalltalk.withContext(function($ctx3) { return self._where(); }, function($ctx3) {$ctx3.fillBlock({},$ctx2,3)})})); $ctx2.sendIdx["onClick:"]=2; $6; $7=_st(html)._button(); $ctx2.sendIdx["button"]=3; _st($7)._class_("btn stepOver"); $ctx2.sendIdx["class:"]=4; _st($7)._with_("Step over"); $ctx2.sendIdx["with:"]=4; $8=_st($7)._onClick_((function(){ return smalltalk.withContext(function($ctx3) { return self._stepOver(); }, function($ctx3) {$ctx3.fillBlock({},$ctx2,4)})})); $ctx2.sendIdx["onClick:"]=3; $8; $9=_st(html)._button(); _st($9)._class_("btn proceed"); _st($9)._with_("Proceed"); $10=_st($9)._onClick_((function(){ return smalltalk.withContext(function($ctx3) { return self._proceed(); }, function($ctx3) {$ctx3.fillBlock({},$ctx2,5)})})); return $10; }, function($ctx2) {$ctx2.fillBlock({},$ctx1,1)})})); $ctx1.sendIdx["with:"]=1; return self}, function($ctx1) {$ctx1.fill(self,"renderButtonsOn:",{html:html},globals.HLStackListWidget)})}, args: ["html"], source: "renderButtonsOn: html\x0a\x09html div \x0a\x09\x09class: 'debugger_bar'; \x0a\x09\x09with: [\x0a\x09\x09\x09html button \x0a\x09\x09\x09\x09class: 'btn restart';\x0a\x09\x09\x09\x09with: 'Restart';\x0a\x09\x09\x09\x09onClick: [ self restart ].\x0a\x09\x09\x09html button \x0a\x09\x09\x09\x09class: 'btn where';\x0a\x09\x09\x09\x09with: 'Where';\x0a\x09\x09\x09\x09onClick: [ self where ].\x0a\x09\x09\x09html button \x0a\x09\x09\x09\x09class: 'btn stepOver';\x0a\x09\x09\x09\x09with: 'Step over';\x0a\x09\x09\x09\x09onClick: [ self stepOver ].\x0a\x09\x09\x09html button \x0a\x09\x09\x09\x09class: 'btn proceed';\x0a\x09\x09\x09\x09with: 'Proceed';\x0a\x09\x09\x09\x09onClick: [ self proceed ] ]", messageSends: ["class:", "div", "with:", "button", "onClick:", "restart", "where", "stepOver", "proceed"], referencedClasses: [] }), globals.HLStackListWidget); smalltalk.addMethod( smalltalk.method({ selector: "restart", protocol: 'actions', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { _st(self._model())._restart(); return self}, function($ctx1) {$ctx1.fill(self,"restart",{},globals.HLStackListWidget)})}, args: [], source: "restart\x0a\x09self model restart", messageSends: ["restart", "model"], referencedClasses: [] }), globals.HLStackListWidget); smalltalk.addMethod( smalltalk.method({ selector: "selectItem:", protocol: 'actions', fn: function (aContext){ var self=this; return smalltalk.withContext(function($ctx1) { _st(self._model())._currentContext_(aContext); ($ctx1.supercall = true, globals.HLStackListWidget.superclass.fn.prototype._selectItem_.apply(_st(self), [aContext])); $ctx1.supercall = false; return self}, function($ctx1) {$ctx1.fill(self,"selectItem:",{aContext:aContext},globals.HLStackListWidget)})}, args: ["aContext"], source: "selectItem: aContext\x0a \x09self model currentContext: aContext.\x0a\x09super selectItem: aContext", messageSends: ["currentContext:", "model", "selectItem:"], referencedClasses: [] }), globals.HLStackListWidget); smalltalk.addMethod( smalltalk.method({ selector: "selectedItem", protocol: 'actions', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { var $1; $1=_st(self._model())._currentContext(); return $1; }, function($ctx1) {$ctx1.fill(self,"selectedItem",{},globals.HLStackListWidget)})}, args: [], source: "selectedItem\x0a \x09^ self model currentContext", messageSends: ["currentContext", "model"], referencedClasses: [] }), globals.HLStackListWidget); smalltalk.addMethod( smalltalk.method({ selector: "stepOver", protocol: 'actions', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { _st(self._model())._stepOver(); return self}, function($ctx1) {$ctx1.fill(self,"stepOver",{},globals.HLStackListWidget)})}, args: [], source: "stepOver\x0a\x09self model stepOver", messageSends: ["stepOver", "model"], referencedClasses: [] }), globals.HLStackListWidget); smalltalk.addMethod( smalltalk.method({ selector: "where", protocol: 'actions', fn: function (){ var self=this; return smalltalk.withContext(function($ctx1) { _st(self._model())._where(); return self}, function($ctx1) {$ctx1.fill(self,"where",{},globals.HLStackListWidget)})}, args: [], source: "where\x0a\x09self model where", messageSends: ["where", "model"], referencedClasses: [] }), globals.HLStackListWidget); });<|fim▁end|>
<|file_name|>parameters_io.hpp<|end_file_name|><|fim▁begin|>#ifndef OSRM_TEST_SERVER_PARAMETERS_IO #define OSRM_TEST_SERVER_PARAMETERS_IO #include "engine/api/route_parameters.hpp" #include "engine/approach.hpp" #include "engine/bearing.hpp" #include <ostream> namespace osrm { namespace engine { namespace api { inline std::ostream &operator<<(std::ostream &out, api::RouteParameters::GeometriesType geometries) { switch (geometries) { case api::RouteParameters::GeometriesType::GeoJSON: out << "GeoJSON"; break; case api::RouteParameters::GeometriesType::Polyline: out << "Polyline"; break; default: BOOST_ASSERT_MSG(false, "GeometriesType not fully captured"); } return out; } inline std::ostream &operator<<(std::ostream &out, api::RouteParameters::OverviewType overview) { switch (overview) { case api::RouteParameters::OverviewType::False: out << "False"; break; case api::RouteParameters::OverviewType::Full: out << "Full"; break; case api::RouteParameters::OverviewType::Simplified: out << "Simplified"; break; default: BOOST_ASSERT_MSG(false, "OverviewType not fully captured"); } return out; } } inline std::ostream &operator<<(std::ostream &out, Bearing bearing) { out << bearing.bearing << "," << bearing.range;<|fim▁hole|>inline std::ostream &operator<<(std::ostream &out, Approach approach) { out << static_cast<int>(approach); return out; } } } #endif<|fim▁end|>
return out; }
<|file_name|>_HttpClient.js<|end_file_name|><|fim▁begin|>/* * Copyright 2014 Fulup Ar Foll. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software<|fim▁hole|> * limitations under the License. * * This modele is used for HTTP like divices that do not rely on TCP session * typical for phone application as CellTrackGTS and others. */ 'use strict'; var Debug = require("./_Debug"); var TrackerCmd= require("../lib/_TrackerCmd"); // small object to keep track of last position in ram function PositionObj (data) { this.msg = parseInt (data.cmd); this.lat = parseFloat(data.lat); this.lon = parseFloat(data.lon); this.sog = parseFloat(data.sog) || 0; this.cog = parseFloat(data.cog) || 0; this.alt = parseFloat(data.alt) || 0; this.moved = parseInt(data.moved) || -1; this.elapsed= parseInt(data.elapsed) || -1; this.valid = parseInt(+data.valid); this.acquired_at = data.acquired_at; this.gpsdate= data.date; } // called from http class of adapter function GpsdHttpClient (adapter, devid) { this.debug = adapter.debug; // inherit debug level this.uid = "httpclient//" + adapter.info + ":" + devid; this.adapter = adapter; this.gateway = adapter.gateway; this.controller = adapter.controller; this.socket = null; // we cannot rely on socket to talk to device this.devid = false; // we get uid directly from device this.name = false; this.logged = false; this.alarm = 0; // count alarm messages this.count = 0; // generic counter used by file backend this.errorcount = 0; // number of ignore messages this.uid = "httpclient://" + this.adapter.info + ":" + this.adapter.id; }; // Import debug method GpsdHttpClient.prototype.Debug = Debug; // This method is fast but very approximative for close points // User may expect 50% of error for distance of few 100m // nevertheless this is more than enough to optimize storage. GpsdHttpClient.prototype.Distance = function (old, now) { var R = 6371; // Radius of the earth in km var dLat = (now.lat - old.lat) * Math.PI / 180; // deg2rad below var dLon = (now.lon - old.lon) * Math.PI / 180; var a = 0.5 - Math.cos(dLat)/2 + Math.cos(old.lat * Math.PI / 180) * Math.cos(now.lat * Math.PI / 180) * (1 - Math.cos(dLon))/2; var d= R * 2 * Math.asin(Math.sqrt(a)); d= Math.round (d*1000); this.Debug (7, "Distance devid:%s [%s] moved %dm", this.devid, this.name, d); return (d); // return distance in meters }; GpsdHttpClient.prototype.DummyName = function (devid) { var devname = devid.toString(); return devname.substring(devname.length-8); }; GpsdHttpClient.prototype.LoginDev = function(data) { // make code simpler to read var adapter = this.adapter; var controller= adapter.controller; var gateway = adapter.controller.gateway; gateway.event.emit ("notice", "LOGIN_REQUEST", data.devid, this.uid, ""); // if we not logged do it now if (this.logged === false) { this.devid = data.devid; this.class = controller.adapter.info; //Update/Create device socket store by uid at gateway level gateway.activeClients [this.devid] = this; //Propose a fake name in case nothing exist var emeifix = this.DummyName (this.devid); this.callsign = "FX-" + emeifix; this.model = this.devid; if (!data.name) this.name = this.adapter.id + "-" + emeifix; else this.name = data.name; // ask backend to authenticate device and eventfully to change logged state to true gateway.backend.LoginDev (this); } }; GpsdHttpClient.prototype.LogoutDev = function() { var gateway = this.adapter.controller.gateway; if (this.logged) { delete gateway.activeClients [this.devid]; gateway.backend.LogoutDev (this); } }; // Action depending on data parsed by the adapter GpsdHttpClient.prototype.ProcessData = function(data) { // make code simpler to read var adapter = this.adapter; var controller= adapter.controller; var gateway = adapter.controller.gateway; // update lastshow to cleanup crom this.lastshow= new Date().getTime(); switch (data.cmd) { // This device is not register inside GpsdHttpClient Object case TrackerCmd.GetFrom.LOGIN: this.LoginDev (data); break; // Device keep alive service case TrackerCmd.GetFrom.PING: break; // Standard tracking information case TrackerCmd.GetFrom.TRACK : var update = true; // default is do the update data.acquired_at = new Date().getTime(); // compute distance only update backend is distance is greater than xxxm if (this.stamp !== undefined) { var moved = parseInt (this.Distance (this.stamp, data)); // compute elapsed time since last update var elapsed = parseInt((data.acquired_at - this.stamp.acquired_at)/1000) ; // in seconds var speedms = parseInt (moved/elapsed); // NEED TO BE KNOWN: with short tic speed is quicky overestimated by 100% !!! // usefull human readable info for control console data.moved = moved; data.elapsed = elapsed; // if moved less than mindist or faster than maxspeed check maxtime value if (moved < this.controller.svcopts.mindist) { this.Debug(2,"%d Dev=%s Data ignored moved %dm<%dm ?", this.errorcount, this.devid, moved, this.controller.svcopts.mindist); // should we force a DB update because maxtime ? if (elapsed < this.controller.svcopts.maxtime) update = false; } // if moved less than mindist or faster than maxspeed check maxtime value if (speedms > this.controller.svcopts.maxspeed) { this.Debug(2,"%d Dev %s Data ignored speed %dm/s >%dm/s ?", this.errorcount, this.devid, speedms, this.controller.svcopts.maxspeed); // we only ignore maxErrorCount message, then we restart data acquisition if (this.errorcount++ < this.controller.svcopts.maxerrors) update = false; } } else { data.moved = 0; data.elapsed = 0; } // update database and store current device location in object for mindist computation if (update) { // update device last position in Ram/Database this.stamp = new PositionObj(data); gateway.backend.UpdatePosDev (this); } else { this.Debug(6,"%s Dev=%s ignored moved %dm<%dm ?", this.count, this.devid, moved, this.controller.svcopts.mindist); this.gateway.backend.IgnorePosDev (this); } break; default: this.Debug(2, "Notice: [%s] Unknown command=[%s] Ignored", this.uid, data.cmd); return; break; } // end switch gateway.event.emit ("accept", this, data); this.Debug (5, "Devid:[%s] Name:[%s] Cmd:[%s] Lat:%d Lon:%d Date:%s Logged=%s", this.devid, this.name, data.cmd, data.lat, data.lon, data.date, this.logged ); }; // Only LOGOUT command make sence with a TcpFeed GpsdHttpClient.prototype.RequestAction = function(command,args){ // send command to adapter & backend var status = this.adapter.SendCommand (this,command,args); if (status !== 0) { this.gateway.event.emit ("notice", "UNSUP_CMD", command, this.adapter.uid); } return(status); }; module.exports = GpsdHttpClient;<|fim▁end|>
* distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and
<|file_name|>02-list-servers.go<|end_file_name|><|fim▁begin|>package main import ( "flag"<|fim▁hole|>) var quiet = flag.Bool("quiet", false, "Quiet mode, for acceptance testing. $? still indicates errors though.") func main() { flag.Parse() withIdentity(false, func(acc gophercloud.AccessProvider) { withServerApi(acc, func(api gophercloud.CloudServersProvider) { tryFullDetails(api) tryLinksOnly(api) }) }) } func tryLinksOnly(api gophercloud.CloudServersProvider) { servers, err := api.ListServersLinksOnly() if err != nil { panic(err) } if !*quiet { fmt.Println("Id,Name") for _, s := range servers { if s.AccessIPv4 != "" { panic("IPv4 not expected") } if s.Status != "" { panic("Status not expected") } if s.Progress != 0 { panic("Progress not expected") } fmt.Printf("%s,\"%s\"\n", s.Id, s.Name) } } } func tryFullDetails(api gophercloud.CloudServersProvider) { servers, err := api.ListServers() if err != nil { panic(err) } if !*quiet { fmt.Println("Id,Name,AccessIPv4,Status,Progress") for _, s := range servers { fmt.Printf("%s,\"%s\",%s,%s,%d\n", s.Id, s.Name, s.AccessIPv4, s.Status, s.Progress) } } }<|fim▁end|>
"fmt" "github.com/rackspace/gophercloud"
<|file_name|>load_test.py<|end_file_name|><|fim▁begin|>import re from threading import Thread import time from django.core.management.base import BaseCommand import requests from mittab.apps.tab.models import Round, TabSettings from mittab.apps.tab.management.commands import utils class Command(BaseCommand): help = "Load test the tournament, connecting via localhost and hitting the server" def add_arguments(self, parser): parser.add_argument( "--host", dest="host", help="The hostname of the server to hit", nargs="?", default="localhost:8000") parser.add_argument( "--connections", dest="connections", help="The number of concurrent connections to open", nargs="?", default=10, type=int) def handle(self, *args, **options): cur_round = TabSettings.get("cur_round") - 1 host = options["host"] csrf_threads = [] rounds = Round.objects.filter(round_number=cur_round, victor=Round.NONE) for round_obj in rounds: judge = round_obj.chair csrf_threads.append(GetCsrfThread(host, judge.ballot_code, round_obj)) num_errors = 0 while csrf_threads: cur_csrf_threads = [] for _ in range(min(len(csrf_threads), options["connections"])): cur_csrf_threads.append(csrf_threads.pop()) for thr in cur_csrf_threads: thr.start() for thr in cur_csrf_threads: thr.join() result_threads = [] for thr in cur_csrf_threads: num_errors += num_errors csrf_token, num_errors = thr.result if csrf_token is None: print("no csrf token") <|fim▁hole|> thr.round_obj) result_threads.append(result_thread) for thr in result_threads: thr.start() for thr in result_threads: thr.join() for thr in result_threads: num_errors += thr.num_errors print("Done with one batch! Sleeping!") time.sleep(2) print("Done!") print("Total errors: %s" % num_errors) class SubmitResultThread(Thread): MAX_ERRORS = 10 def __init__(self, host, ballot_code, csrf_token, round_obj): super(SubmitResultThread, self).__init__() self.host = host self.ballot_code = ballot_code self.csrf_token = csrf_token self.round_obj = round_obj self.num_errors = 0 self.resp = None def run(self): self.resp = self.get_resp() def get_resp(self): if self.num_errors >= self.MAX_ERRORS: return None result = utils.generate_random_results(self.round_obj, self.ballot_code) result["csrfmiddlewaretoken"] = self.csrf_token resp = requests.post("http://%s/e_ballots/%s/" % (self.host, self.ballot_code), result, cookies={"csrftoken": self.csrf_token}) if resp.status_code > 299: self.num_errors += 1 return self.get_resp() else: return resp.text class GetCsrfThread(Thread): REGEX = "name=\"csrfmiddlewaretoken\" value=\"([^\"]+)\"" MAX_ERRORS = 10 def __init__(self, host, ballot_code, round_obj): super(GetCsrfThread, self).__init__() self.num_errors = 0 self.host = host self.ballot_code = ballot_code self.round_obj = round_obj self.result = (None, None) def run(self): resp = self.get_resp() if resp is None: self.result = (None, self.num_errors) else: csrf = re.search(self.REGEX, resp).group(1) self.result = (csrf, self.num_errors) def get_resp(self): if self.num_errors >= self.MAX_ERRORS: return None resp = requests.get("http://%s/e_ballots/%s" % (self.host, self.ballot_code)) if resp.status_code > 299: self.num_errors += 1 return self.get_resp() else: return resp.text<|fim▁end|>
result_thread = SubmitResultThread( thr.host, thr.ballot_code, csrf_token,
<|file_name|>azure_rm_mariadbdatabase_info.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # # Copyright (c) 2017 Zim Kalinowski, <[email protected]> # Copyright (c) 2019 Matti Ranta, (@techknowlogick) # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_mariadbdatabase_info version_added: "2.9" short_description: Get Azure MariaDB Database facts description: - Get facts of MariaDB Database. options: resource_group: description: - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. required: True type: str server_name: description: - The name of the server. required: True type: str name: description: - The name of the database. type: str extends_documentation_fragment: - azure author: - Zim Kalinowski (@zikalino) - Matti Ranta (@techknowlogick) ''' EXAMPLES = ''' - name: Get instance of MariaDB Database azure_rm_mariadbdatabase_info: resource_group: myResourceGroup server_name: server_name name: database_name - name: List instances of MariaDB Database azure_rm_mariadbdatabase_info: resource_group: myResourceGroup server_name: server_name '''<|fim▁hole|>RETURN = ''' databases: description: - A list of dictionaries containing facts for MariaDB Databases. returned: always type: complex contains: id: description: - Resource ID. returned: always type: str sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testser ver/databases/db1" resource_group: description: - Resource group name. returned: always type: str sample: testrg server_name: description: - Server name. returned: always type: str sample: testserver name: description: - Resource name. returned: always type: str sample: db1 charset: description: - The charset of the database. returned: always type: str sample: UTF8 collation: description: - The collation of the database. returned: always type: str sample: English_United States.1252 ''' from ansible.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError from azure.mgmt.rdbms.mariadb import MariaDBManagementClient from msrest.serialization import Model except ImportError: # This is handled in azure_rm_common pass class AzureRMMariaDbDatabaseInfo(AzureRMModuleBase): def __init__(self): # define user inputs into argument self.module_arg_spec = dict( resource_group=dict( type='str', required=True ), server_name=dict( type='str', required=True ), name=dict( type='str' ) ) # store the results of the module operation self.results = dict( changed=False ) self.resource_group = None self.server_name = None self.name = None super(AzureRMMariaDbDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=False) def exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_mariadbdatabase_facts' if is_old_facts: self.module.deprecate("The 'azure_rm_mariadbdatabase_facts' module has been renamed to 'azure_rm_mariadbdatabase_info'", version='2.13') for key in self.module_arg_spec: setattr(self, key, kwargs[key]) if (self.resource_group is not None and self.server_name is not None and self.name is not None): self.results['databases'] = self.get() elif (self.resource_group is not None and self.server_name is not None): self.results['databases'] = self.list_by_server() return self.results def get(self): response = None results = [] try: response = self.mariadb_client.databases.get(resource_group_name=self.resource_group, server_name=self.server_name, database_name=self.name) self.log("Response : {0}".format(response)) except CloudError as e: self.log('Could not get facts for Databases.') if response is not None: results.append(self.format_item(response)) return results def list_by_server(self): response = None results = [] try: response = self.mariadb_client.databases.list_by_server(resource_group_name=self.resource_group, server_name=self.server_name) self.log("Response : {0}".format(response)) except CloudError as e: self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e))) if response is not None: for item in response: results.append(self.format_item(item)) return results def format_item(self, item): d = item.as_dict() d = { 'resource_group': self.resource_group, 'server_name': self.server_name, 'name': d['name'], 'charset': d['charset'], 'collation': d['collation'] } return d def main(): AzureRMMariaDbDatabaseInfo() if __name__ == '__main__': main()<|fim▁end|>
<|file_name|>views.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import OrderedDict from django.urls import reverse from django.urls import reverse_lazy from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import messages from horizon import tables from horizon import tabs from horizon.utils import memoized from openstack_dashboard import api from openstack_dashboard.dashboards.admin.rbac_policies \ import forms as rbac_policy_forms from openstack_dashboard.dashboards.admin.rbac_policies \ import tables as rbac_policy_tables from openstack_dashboard.dashboards.admin.rbac_policies \ import tabs as rbac_policy_tabs class IndexView(tables.DataTableView): table_class = rbac_policy_tables.RBACPoliciesTable page_title = _("RBAC Policies") @memoized.memoized_method def _get_tenants(self): try: tenants, has_more = api.keystone.tenant_list(self.request) except Exception: tenants = [] msg = _("Unable to retrieve information about the " "policies' projects.") exceptions.handle(self.request, msg) tenant_dict = OrderedDict([(t.id, t.name) for t in tenants]) return tenant_dict def _get_networks(self): try: networks = api.neutron.network_list(self.request) except Exception: networks = [] msg = _("Unable to retrieve information about the " "policies' networks.") exceptions.handle(self.request, msg) return dict((n.id, n.name) for n in networks) def _get_qos_policies(self): qos_policies = [] try: if api.neutron.is_extension_supported(self.request, extension_alias='qos'): qos_policies = api.neutron.policy_list(self.request) except Exception: msg = _("Unable to retrieve information about the " "policies' qos policies.") exceptions.handle(self.request, msg) return dict((q.id, q.name) for q in qos_policies) def get_data(self): try: rbac_policies = api.neutron.rbac_policy_list(self.request) except Exception: rbac_policies = [] messages.error(self.request, _("Unable to retrieve RBAC policies.")) if rbac_policies: tenant_dict = self._get_tenants() network_dict = self._get_networks() qos_policy_dict = self._get_qos_policies() for p in rbac_policies: # Set tenant name and object name p.tenant_name = tenant_dict.get(p.tenant_id, p.tenant_id) p.target_tenant_name = tenant_dict.get(p.target_tenant, p.target_tenant) if p.object_type == "network": p.object_name = network_dict.get(p.object_id, p.object_id) elif p.object_type == "qos_policy": p.object_name = qos_policy_dict.get(p.object_id, p.object_id) return rbac_policies class CreateView(forms.ModalFormView): template_name = 'admin/rbac_policies/create.html' form_id = "create_rbac_policy_form" form_class = rbac_policy_forms.CreatePolicyForm submit_label = _("Create RBAC Policy") submit_url = reverse_lazy("horizon:admin:rbac_policies:create") success_url = reverse_lazy("horizon:admin:rbac_policies:index") page_title = _("Create A RBAC Policy") class UpdateView(forms.ModalFormView): context_object_name = 'rbac_policies' template_name = 'admin/rbac_policies/update.html' form_class = rbac_policy_forms.UpdatePolicyForm form_id = "update_rbac_policy_form" submit_label = _("Save Changes")<|fim▁hole|> def get_context_data(self, **kwargs): context = super(UpdateView, self).get_context_data(**kwargs) args = (self.kwargs['rbac_policy_id'],) context["rbac_policy_id"] = self.kwargs['rbac_policy_id'] context["submit_url"] = reverse(self.submit_url, args=args) return context @memoized.memoized_method def _get_object(self, *args, **kwargs): rbac_policy_id = self.kwargs['rbac_policy_id'] try: return api.neutron.rbac_policy_get(self.request, rbac_policy_id) except Exception: redirect = self.success_url msg = _('Unable to retrieve rbac policy details.') exceptions.handle(self.request, msg, redirect=redirect) def get_initial(self): rbac_policy = self._get_object() return {'rbac_policy_id': rbac_policy['id'], 'target_tenant': rbac_policy['target_tenant']} class DetailView(tabs.TabView): tab_group_class = rbac_policy_tabs.RBACDetailsTabs template_name = 'horizon/common/_detail.html' page_title = "{{ rbac_policy.id }}"<|fim▁end|>
submit_url = 'horizon:admin:rbac_policies:update' success_url = reverse_lazy('horizon:admin:rbac_policies:index') page_title = _("Update RBAC Policy")
<|file_name|>views.py<|end_file_name|><|fim▁begin|>""" Definition of views. """ from app.models import Choice, Poll from datetime import datetime from django.contrib.auth.decorators import login_required from django.core.urlresolvers import reverse from django.http import HttpRequest, HttpResponseRedirect from django.shortcuts import get_object_or_404, render from django.template import RequestContext from django.utils import timezone from django.views.generic import ListView, DetailView from os import path import json <|fim▁hole|> model = Poll def get_context_data(self, **kwargs): context = super(PollListView, self).get_context_data(**kwargs) context['title'] = 'Polls' context['year'] = datetime.now().year return context class PollDetailView(DetailView): """Renders the poll details page.""" model = Poll def get_context_data(self, **kwargs): context = super(PollDetailView, self).get_context_data(**kwargs) context['title'] = 'Poll' context['year'] = datetime.now().year return context class PollResultsView(DetailView): """Renders the results page.""" model = Poll def get_context_data(self, **kwargs): context = super(PollResultsView, self).get_context_data(**kwargs) context['title'] = 'Results' context['year'] = datetime.now().year return context def contact(request): """Renders the contact page.""" assert isinstance(request, HttpRequest) return render( request, 'app/contact.html', context_instance = RequestContext(request, { 'title': 'Contact', 'message': 'Your contact page.', 'year': datetime.now().year, }) ) def about(request): """Renders the about page.""" assert isinstance(request, HttpRequest) return render( request, 'app/about.html', context_instance = RequestContext(request, { 'title': 'About', 'message': 'Your application description page.', 'year': datetime.now().year, }) ) def vote(request, poll_id): """Handles voting. Validates input and updates the repository.""" poll = get_object_or_404(Poll, pk=poll_id) try: selected_choice = poll.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): return render(request, 'app/details.html', { 'title': 'Poll', 'year': datetime.now().year, 'poll': poll, 'error_message': "Please make a selection.", }) else: selected_choice.votes += 1 selected_choice.save() return HttpResponseRedirect(reverse('app:results', args=(poll.id,))) @login_required def seed(request): """Seeds the database with sample polls.""" samples_path = path.join(path.dirname(__file__), 'samples.json') with open(samples_path, 'r') as samples_file: samples_polls = json.load(samples_file) for sample_poll in samples_polls: poll = Poll() poll.text = sample_poll['text'] poll.pub_date = timezone.now() poll.save() for sample_choice in sample_poll['choices']: choice = Choice() choice.poll = poll choice.text = sample_choice choice.votes = 0 choice.save() return HttpResponseRedirect(reverse('app:home'))<|fim▁end|>
class PollListView(ListView): """Renders the home page, with a list of all polls."""
<|file_name|>app.js<|end_file_name|><|fim▁begin|>"use strict"; /*global templates, ajaxify, utils, bootbox, overrides, socket, config, Visibility*/ var app = app || {}; app.isFocused = true; app.currentRoom = null; app.widgets = {}; app.cacheBuster = null; (function () { var showWelcomeMessage = !!utils.params().loggedin; templates.setGlobal('config', config); app.cacheBuster = config['cache-buster']; bootbox.setDefaults({ locale: config.userLang }); app.load = function() { app.loadProgressiveStylesheet(); var url = ajaxify.start(window.location.pathname.slice(1) + window.location.search + window.location.hash); ajaxify.updateHistory(url, true); ajaxify.end(url, app.template); handleStatusChange(); if (config.searchEnabled) { app.handleSearch(); } $('#content').on('click', '#new_topic', function(){ app.newTopic(); }); require(['components'], function(components) { components.get('user/logout').on('click', app.logout); }); Visibility.change(function(e, state){ if (state === 'visible') { app.isFocused = true; app.alternatingTitle(''); } else if (state === 'hidden') { app.isFocused = false; } }); overrides.overrideBootbox(); overrides.overrideTimeago(); createHeaderTooltips(); app.showEmailConfirmWarning(); socket.removeAllListeners('event:nodebb.ready'); socket.on('event:nodebb.ready', function(data) { if (!app.cacheBuster || app.cacheBuster !== data['cache-buster']) { app.cacheBuster = data['cache-buster']; app.alert({ alert_id: 'forum_updated', title: '[[global:updated.title]]', message: '[[global:updated.message]]', clickfn: function() { window.location.reload();<|fim▁hole|> }, type: 'warning' }); } }); require(['taskbar', 'helpers', 'forum/pagination'], function(taskbar, helpers, pagination) { taskbar.init(); // templates.js helpers helpers.register(); pagination.init(); $(window).trigger('action:app.load'); }); }; app.logout = function() { $.ajax(config.relative_path + '/logout', { type: 'POST', headers: { 'x-csrf-token': config.csrf_token }, success: function() { window.location.href = config.relative_path + '/'; } }); }; app.alert = function (params) { require(['alerts'], function(alerts) { alerts.alert(params); }); }; app.removeAlert = function(id) { require(['alerts'], function(alerts) { alerts.remove(id); }); }; app.alertSuccess = function (message, timeout) { app.alert({ title: '[[global:alert.success]]', message: message, type: 'success', timeout: timeout ? timeout : 5000 }); }; app.alertError = function (message, timeout) { app.alert({ title: '[[global:alert.error]]', message: message, type: 'danger', timeout: timeout ? timeout : 10000 }); }; app.enterRoom = function (room, callback) { callback = callback || function() {}; if (socket && app.user.uid && app.currentRoom !== room) { var previousRoom = app.currentRoom; app.currentRoom = room; socket.emit('meta.rooms.enter', { enter: room }, function(err) { if (err) { app.currentRoom = previousRoom; return app.alertError(err.message); } callback(); }); } }; app.leaveCurrentRoom = function() { if (!socket) { return; } socket.emit('meta.rooms.leaveCurrent', function(err) { if (err) { return app.alertError(err.message); } app.currentRoom = ''; }); }; function highlightNavigationLink() { var path = window.location.pathname; $('#main-nav li').removeClass('active'); if (path) { $('#main-nav li').removeClass('active').find('a[href="' + path + '"]').parent().addClass('active'); } } app.createUserTooltips = function(els) { els = els || $('body'); els.find('.avatar,img[title].teaser-pic,img[title].user-img,div.user-icon,span.user-icon').each(function() { if (!utils.isTouchDevice()) { $(this).tooltip({ placement: 'top', title: $(this).attr('title') }); } }); }; app.createStatusTooltips = function() { if (!utils.isTouchDevice()) { $('body').tooltip({ selector:'.fa-circle.status', placement: 'top' }); } }; app.replaceSelfLinks = function(selector) { selector = selector || $('a'); selector.each(function() { var href = $(this).attr('href'); if (href && app.user.userslug && href.indexOf('user/_self_') !== -1) { $(this).attr('href', href.replace(/user\/_self_/g, 'user/' + app.user.userslug)); } }); }; app.processPage = function () { highlightNavigationLink(); $('.timeago').timeago(); utils.makeNumbersHumanReadable($('.human-readable-number')); utils.addCommasToNumbers($('.formatted-number')); app.createUserTooltips(); app.createStatusTooltips(); app.replaceSelfLinks(); // Scroll back to top of page window.scrollTo(0, 0); }; app.showLoginMessage = function () { function showAlert() { app.alert({ type: 'success', title: '[[global:welcome_back]] ' + app.user.username + '!', message: '[[global:you_have_successfully_logged_in]]', timeout: 5000 }); } if (showWelcomeMessage) { showWelcomeMessage = false; if (document.readyState !== 'complete') { $(document).ready(showAlert); } else { showAlert(); } } }; app.openChat = function (roomId) { if (!app.user.uid) { return app.alertError('[[error:not-logged-in]]'); } require(['chat'], function (chat) { function loadAndCenter(chatModal) { chat.load(chatModal.attr('UUID')); chat.center(chatModal); chat.focusInput(chatModal); } if (chat.modalExists(roomId)) { loadAndCenter(chat.getModal(roomId)); } else { socket.emit('modules.chats.loadRoom', {roomId: roomId}, function(err, roomData) { if (err) { return app.alertError(err.message); } roomData.users = roomData.users.filter(function(user) { return user && parseInt(user.uid, 10) !== parseInt(app.user.uid, 10); }); chat.createModal(roomData, loadAndCenter); }); } }); }; app.newChat = function (touid) { if (!app.user.uid) { return app.alertError('[[error:not-logged-in]]'); } socket.emit('modules.chats.newRoom', {touid: touid}, function(err, roomId) { if (err) { return app.alertError(err.message); } if (!ajaxify.currentPage.startsWith('chats')) { app.openChat(roomId); } else { ajaxify.go('chats/' + roomId); } }); }; var titleObj = { active: false, interval: undefined, titles: [] }; app.alternatingTitle = function (title) { if (typeof title !== 'string') { return; } if (title.length > 0 && !app.isFocused) { if (!titleObj.titles[0]) { titleObj.titles[0] = window.document.title; } require(['translator'], function(translator) { translator.translate(title, function(translated) { titleObj.titles[1] = translated; if (titleObj.interval) { clearInterval(titleObj.interval); } titleObj.interval = setInterval(function() { var title = titleObj.titles[titleObj.titles.indexOf(window.document.title) ^ 1]; if (title) { window.document.title = $('<div/>').html(title).text(); } }, 2000); }); }); } else { if (titleObj.interval) { clearInterval(titleObj.interval); } if (titleObj.titles[0]) { window.document.title = $('<div/>').html(titleObj.titles[0]).text(); } } }; app.refreshTitle = function(title) { if (!title) { return; } require(['translator'], function(translator) { title = config.titleLayout.replace(/&#123;/g, '{').replace(/&#125;/g, '}') .replace('{pageTitle}', function() { return title; }) .replace('{browserTitle}', function() { return config.browserTitle; }); translator.translate(title, function(translated) { titleObj.titles[0] = translated; app.alternatingTitle(''); }); }); }; app.toggleNavbar = function(state) { var navbarEl = $('.navbar'); if (navbarEl) { navbarEl.toggleClass('hidden', !!!state); } }; function createHeaderTooltips() { var env = utils.findBootstrapEnvironment(); if (env === 'xs' || env === 'sm') { return; } $('#header-menu li a[title]').each(function() { if (!utils.isTouchDevice()) { $(this).tooltip({ placement: 'bottom', trigger: 'hover', title: $(this).attr('title') }); } }); if (!utils.isTouchDevice()) { $('#search-form').parent().tooltip({ placement: 'bottom', trigger: 'hover', title: $('#search-button i').attr('title') }); } if (!utils.isTouchDevice()) { $('#user_dropdown').tooltip({ placement: 'bottom', trigger: 'hover', title: $('#user_dropdown').attr('title') }); } } app.handleSearch = function () { var searchButton = $("#search-button"), searchFields = $("#search-fields"), searchInput = $('#search-fields input'); $('#search-form .advanced-search-link').on('mousedown', function() { ajaxify.go('/search'); }); $('#search-form').on('submit', dismissSearch); searchInput.on('blur', dismissSearch); function dismissSearch(){ searchFields.addClass('hidden'); searchButton.removeClass('hidden'); } searchButton.on('click', function(e) { if (!config.loggedIn && !config.allowGuestSearching) { app.alert({ message:'[[error:search-requires-login]]', timeout: 3000 }); ajaxify.go('login'); return false; } e.stopPropagation(); app.prepareSearch(); return false; }); $('#search-form').on('submit', function () { var input = $(this).find('input'); require(['search'], function(search) { search.query({term: input.val()}, function() { input.val(''); }); }); return false; }); }; app.prepareSearch = function() { $("#search-fields").removeClass('hidden'); $("#search-button").addClass('hidden'); $('#search-fields input').focus(); }; function handleStatusChange() { $('[component="header/usercontrol"] [data-status]').off('click').on('click', function(e) { var status = $(this).attr('data-status'); socket.emit('user.setStatus', status, function(err) { if(err) { return app.alertError(err.message); } $('[data-uid="' + app.user.uid + '"] [component="user/status"], [component="header/profilelink"] [component="user/status"]') .removeClass('away online dnd offline') .addClass(status); app.user.status = status; }); e.preventDefault(); }); } app.updateUserStatus = function(el, status) { if (!el.length) { return; } require(['translator'], function(translator) { translator.translate('[[global:' + status + ']]', function(translated) { el.removeClass('online offline dnd away') .addClass(status) .attr('title', translated) .attr('data-original-title', translated); }); }); }; app.newTopic = function (cid) { $(window).trigger('action:composer.topic.new', { cid: cid || ajaxify.data.cid || 0 }); }; app.loadJQueryUI = function(callback) { if (typeof $().autocomplete === 'function') { return callback(); } var scriptEl = document.createElement('script'); scriptEl.type = 'text/javascript'; scriptEl.src = config.relative_path + '/vendor/jquery/js/jquery-ui-1.10.4.custom.js' + (app.cacheBuster ? '?v=' + app.cacheBuster : ''); scriptEl.onload = callback; document.head.appendChild(scriptEl); }; app.showEmailConfirmWarning = function(err) { if (!config.requireEmailConfirmation || !app.user.uid) { return; } var msg = { alert_id: 'email_confirm', type: 'warning', timeout: 0 }; if (!app.user.email) { msg.message = '[[error:no-email-to-confirm]]'; msg.clickfn = function() { app.removeAlert('email_confirm'); ajaxify.go('user/' + app.user.userslug + '/edit'); }; app.alert(msg); } else if (!app.user['email:confirmed'] && !app.user.isEmailConfirmSent) { msg.message = err ? err.message : '[[error:email-not-confirmed]]'; msg.clickfn = function() { app.removeAlert('email_confirm'); socket.emit('user.emailConfirm', {}, function(err) { if (err) { return app.alertError(err.message); } app.alertSuccess('[[notifications:email-confirm-sent]]'); }); }; app.alert(msg); } else if (!app.user['email:confirmed'] && app.user.isEmailConfirmSent) { msg.message = '[[error:email-not-confirmed-email-sent]]'; app.alert(msg); } }; app.parseAndTranslate = function(template, blockName, data, callback) { require(['translator'], function(translator) { if (typeof blockName === 'string') { templates.parse(template, blockName, data, function(html) { translator.translate(html, function(translatedHTML) { translatedHTML = translator.unescape(translatedHTML); callback($(translatedHTML)); }); }); } else { callback = data, data = blockName; templates.parse(template, data, function(html) { translator.translate(html, function(translatedHTML) { translatedHTML = translator.unescape(translatedHTML); callback($(translatedHTML)); }); }); } }); }; app.loadProgressiveStylesheet = function() { var linkEl = document.createElement('link'); linkEl.rel = 'stylesheet'; linkEl.href = config.relative_path + '/js-enabled.css'; document.head.appendChild(linkEl); }; }());<|fim▁end|>
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. extern crate rand; extern crate regex; extern crate regex_syntax; extern crate quickcheck; use rand::Rng; use rand::distributions::exponential::Exp1; use regex_syntax::{Expr, CharClass, ByteClass, ByteRange, ClassRange, Repeater}; use quickcheck::{Arbitrary, Gen, StdGen, Testable, QuickCheck}; /// TODO: implement unicode support fn randomize_case<G: Rng>(g: &mut G, c: char) -> char { match g.gen_range(0, 3) { 0 => { c } 1 => { if 'a' <= c && c <= 'z' { (c as u8 - 'a' as u8 + 'A' as u8) as char } else { c } } 2 => { if 'A' <= c && c <= 'Z' { (c as u8 - 'A' as u8 + 'a' as u8) as char } else { c } } _ => unreachable!(), } } ///Inclusive of both bounds. fn random_char_in_range<G: Rng>(g: &mut G, low: char, high: char) -> char { let (low, high) = (low as u32, high as u32); let l0 = low; let h0 = ::std::cmp::min(high, 0xD7FF); let l1 = ::std::cmp::max(low, 0xE000); let h1 = ::std::cmp::min(high, 0x10FFFF); let s0 = if h0 < l0 {0} else {h0 - l0 + 1}; let s1 = if h1 < l1 {0} else {h1 - l1 + 1}; let x: u32 = if g.gen_range(0, s1 + s0) < s0 { g.gen_range(l0, h0 + 1) } else { g.gen_range(l1, h1 + 1) }; assert!(low <= x && x <= high); ::std::char::from_u32(x).unwrap() } pub fn sample_language<G: Rng>(g: &mut G, acc: &mut Vec<char>, expr: &Expr) { match expr { &Expr::Empty => (), &Expr::Literal { ref chars, casei, } => { if !casei || g.gen() { acc.extend(chars.iter()); } else { acc.extend(chars.iter().map(|&c| randomize_case(g, c))) } } &Expr::LiteralBytes { ref bytes, casei, } => { if !casei || g.gen() { acc.extend(bytes.iter().map(|&x| x as char)); } else { acc.extend(bytes.iter().map(|&x| randomize_case(g, x as char))); } } &Expr::AnyChar => { acc.push(g.gen()); }, &Expr::AnyCharNoNL => { loop { let c = g.gen(); if c != '\n' { acc.push(c); break; }<|fim▁hole|> acc.push((g.gen::<u8>() % 128) as char); }, &Expr::AnyByteNoNL => { loop { let b = g.gen::<u8>() % 128; if b != '\n' as u8 { acc.push(b as char); break; } } }, &Expr::Class(CharClass { ref ranges } ) => { let ClassRange { start, end } = { ranges[g.gen_range(0, ranges.len())] }; acc.push(random_char_in_range(g, start, end)); }, &Expr::ClassBytes(ByteClass { ref ranges }) => { let ByteRange { start, end } = { ranges[g.gen_range(0, ranges.len())] }; acc.push(g.gen_range(start, end + 1) as char); } &Expr::Group { ref e, i: _, name: _ } => { sample_language(g, acc, e.as_ref()); }, &Expr::Repeat { ref e, r, greedy: _, } => { let n = match r { Repeater::ZeroOrOne => g.gen_range(0, 2), Repeater::ZeroOrMore => {let Exp1(x) = g.gen(); x as usize} Repeater::OneOrMore => {let Exp1(x) = g.gen(); x as usize + 1} Repeater::Range { min: a, max: None} => { let Exp1(x) = g.gen(); x as usize + a as usize } Repeater::Range { min: a, max: Some(b) } => { g.gen_range(a as usize, b as usize + 1) } }; for _ in 0..n { sample_language(g, acc, e); } }, &Expr::Concat(ref v) => { for e in v.iter() { sample_language(g, acc, e); } }, &Expr::Alternate(ref v) => { let i = g.gen_range(0, v.len()); sample_language(g, acc, &v[i]) }, _ => panic!("zero-width assertions unsupported"), } } fn remove_zero_width(e: &mut Expr) { use regex_syntax::Expr::*; match e { &mut StartLine | &mut EndLine | &mut StartText | &mut EndText | &mut WordBoundary | &mut NotWordBoundary | &mut WordBoundaryAscii | &mut NotWordBoundaryAscii => { *e = AnyChar; } &mut Group { ref mut e, .. } => { remove_zero_width(e.as_mut()); } &mut Repeat { ref mut e, .. } => { remove_zero_width(e.as_mut()); } &mut Concat(ref mut v) => { for e in v.iter_mut() { remove_zero_width(e); } } &mut Alternate(ref mut v) => { for e in v.iter_mut() { remove_zero_width(e); } } _ => (), } } #[derive(Clone, Debug)] struct ExprWithMatching { e: Expr, s: String, } impl Arbitrary for ExprWithMatching { fn arbitrary<G: Gen>(g: &mut G) -> Self { let mut e = Arbitrary::arbitrary(g); remove_zero_width(&mut e); let mut v = Vec::new(); sample_language(g, &mut v, &e); ExprWithMatching { e: e, s: v.iter().map(|&c| c).collect(), } } fn shrink(&self) -> Box<Iterator<Item=Self>> { Box::new(self.e.shrink().map(|e| { let mut e = e.clone(); remove_zero_width(&mut e); let mut v = Vec::new(); let mut g = StdGen::new(rand::thread_rng(), 100); sample_language(&mut g, &mut v, &e); ExprWithMatching { e: e, s: v.iter().map(|&c| c).collect(), } })) } } fn qc<T: Testable>(t: T) { QuickCheck::new() .tests(10_000) .max_tests(20_000) .quickcheck(t); } #[test] fn matching_string_matches() { fn prop(ExprWithMatching {e, s}: ExprWithMatching) -> bool { use regex::bytes::Regex; let re = Regex::new(&e.to_string()).unwrap(); let caps = re.captures(&s.as_bytes()); caps.unwrap().pos(0).unwrap().0 == 0 } qc(prop as fn(ExprWithMatching) -> bool); }<|fim▁end|>
} }, &Expr::AnyByte => {
<|file_name|>describe_instance_type_families.go<|end_file_name|><|fim▁begin|>package ecs //Licensed under the Apache License, Version 2.0 (the "License"); //you may not use this file except in compliance with the License. //You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // //Unless required by applicable law or agreed to in writing, software //distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //See the License for the specific language governing permissions and //limitations under the License. // // Code generated by Alibaba Cloud SDK Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" ) // DescribeInstanceTypeFamilies invokes the ecs.DescribeInstanceTypeFamilies API synchronously func (client *Client) DescribeInstanceTypeFamilies(request *DescribeInstanceTypeFamiliesRequest) (response *DescribeInstanceTypeFamiliesResponse, err error) { response = CreateDescribeInstanceTypeFamiliesResponse() err = client.DoAction(request, response) return } // DescribeInstanceTypeFamiliesWithChan invokes the ecs.DescribeInstanceTypeFamilies API asynchronously func (client *Client) DescribeInstanceTypeFamiliesWithChan(request *DescribeInstanceTypeFamiliesRequest) (<-chan *DescribeInstanceTypeFamiliesResponse, <-chan error) { responseChan := make(chan *DescribeInstanceTypeFamiliesResponse, 1) errChan := make(chan error, 1) err := client.AddAsyncTask(func() { defer close(responseChan) defer close(errChan) response, err := client.DescribeInstanceTypeFamilies(request) if err != nil { errChan <- err } else { responseChan <- response } }) if err != nil { errChan <- err close(responseChan) close(errChan) } return responseChan, errChan } // DescribeInstanceTypeFamiliesWithCallback invokes the ecs.DescribeInstanceTypeFamilies API asynchronously func (client *Client) DescribeInstanceTypeFamiliesWithCallback(request *DescribeInstanceTypeFamiliesRequest, callback func(response *DescribeInstanceTypeFamiliesResponse, err error)) <-chan int {<|fim▁hole|> var err error defer close(result) response, err = client.DescribeInstanceTypeFamilies(request) callback(response, err) result <- 1 }) if err != nil { defer close(result) callback(nil, err) result <- 0 } return result } // DescribeInstanceTypeFamiliesRequest is the request struct for api DescribeInstanceTypeFamilies type DescribeInstanceTypeFamiliesRequest struct { *requests.RpcRequest ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"` Generation string `position:"Query" name:"Generation"` ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` OwnerAccount string `position:"Query" name:"OwnerAccount"` OwnerId requests.Integer `position:"Query" name:"OwnerId"` } // DescribeInstanceTypeFamiliesResponse is the response struct for api DescribeInstanceTypeFamilies type DescribeInstanceTypeFamiliesResponse struct { *responses.BaseResponse RequestId string `json:"RequestId" xml:"RequestId"` InstanceTypeFamilies InstanceTypeFamiliesInDescribeInstanceTypeFamilies `json:"InstanceTypeFamilies" xml:"InstanceTypeFamilies"` } // CreateDescribeInstanceTypeFamiliesRequest creates a request to invoke DescribeInstanceTypeFamilies API func CreateDescribeInstanceTypeFamiliesRequest() (request *DescribeInstanceTypeFamiliesRequest) { request = &DescribeInstanceTypeFamiliesRequest{ RpcRequest: &requests.RpcRequest{}, } request.InitWithApiInfo("Ecs", "2014-05-26", "DescribeInstanceTypeFamilies", "ecs", "openAPI") request.Method = requests.POST return } // CreateDescribeInstanceTypeFamiliesResponse creates a response to parse from DescribeInstanceTypeFamilies response func CreateDescribeInstanceTypeFamiliesResponse() (response *DescribeInstanceTypeFamiliesResponse) { response = &DescribeInstanceTypeFamiliesResponse{ BaseResponse: &responses.BaseResponse{}, } return }<|fim▁end|>
result := make(chan int, 1) err := client.AddAsyncTask(func() { var response *DescribeInstanceTypeFamiliesResponse
<|file_name|>SessionConnection.java<|end_file_name|><|fim▁begin|>package com.locompas.edd.bl.model.bd.session; import java.util.ArrayList; import java.util.HashMap; import javax.servlet.http.HttpSession; import com.locomaps.edd.bl.model.User; import com.locomaps.edd.bl.model.db.Persistance; import com.locomaps.edd.bl.model.db.PersistanceManager; import com.locomaps.edd.bl.model.db.PersistanceParameter; public class SessionConnection { //implements Persistance{ // public SessionConnection() { // super(); // } // // Boolean activeConnection; // HttpSession sessionScope; // // public HttpSession getSessionScope() { // return sessionScope; // } // // private void setSessionScope(HttpSession sessionScope) { // if (sessionScope == null){ // this.activeConnection = false; // } else { // this.activeConnection = true; // } // // this.sessionScope = sessionScope; // } // // @Override // public HashMap<String, User> listAllUser() { // HashMap<String,User> listeUser = (HashMap<String, User>) sessionScope.getAttribute("listeUser"); // if (listeUser == null) { // listeUser = new HashMap<String,User>(); // } // return listeUser; // } // // @Override // public User getUserByEMail(String email) { // HashMap<String,User> listeUser = listAllUser(); // User userSession = listeUser.get(email); // // return userSession; // } // // @Override // public boolean change(User user) { // HashMap<String,User> listeUser = listAllUser(); // listeUser.put(user.getEmail(),user); // // return true; // } // <|fim▁hole|>// HashMap<String,User> listeUser = listAllUser(); // // Ajout du nouvel utilisateur dans la session // listeUser.put(user.getEmail(),user); // // return true; // } // // @Override // public boolean initDB(Object chaineDeConnexion) { // boolean retour = false; // // if (chaineDeConnexion instanceof String) { // retour = false; // } else if (chaineDeConnexion instanceof HttpSession) { // this.setSessionScope((HttpSession)chaineDeConnexion); // retour = this.activeConnection; // } // return retour; // } // }<|fim▁end|>
// @Override // public boolean addUser(User user) {
<|file_name|>admin.py<|end_file_name|><|fim▁begin|>from django.contrib import admin<|fim▁hole|>from isi_mip.sciencepaper.models import Paper admin.site.register(Paper)<|fim▁end|>
<|file_name|>cwise_op_round.cc<|end_file_name|><|fim▁begin|>/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/core/kernels/cwise_ops_common.h" namespace tensorflow { REGISTER5(UnaryOp, CPU, "Round", functor::round, Eigen::half, float, double, int32, int64_t); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) REGISTER5(UnaryOp, GPU, "Round", functor::round, Eigen::half, float, double, int32, int64); #endif #endif <|fim▁hole|><|fim▁end|>
} // namespace tensorflow
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>use std; use std::slice; use libc::ptrdiff_t; use md5 as md5_crate; use sha1; use sha2::{Digest, Sha224, Sha256, Sha384, Sha512}; use remacs_macros::lisp_fn; use crate::{ buffers::{LispBufferOrName, LispBufferRef}, lisp::LispObject, multibyte::LispStringRef, remacs_sys::EmacsInt, remacs_sys::{extract_data_from_object, make_uninit_string}, remacs_sys::{Qmd5, Qnil, Qsha1, Qsha224, Qsha256, Qsha384, Qsha512}, symbols::{symbol_name, LispSymbolRef}, threads::ThreadState, }; #[derive(Clone, Copy)] enum HashAlg { MD5, SHA1, SHA224, SHA256, SHA384, SHA512, } static MD5_DIGEST_LEN: usize = 16; static SHA1_DIGEST_LEN: usize = 20; static SHA224_DIGEST_LEN: usize = 224 / 8; static SHA256_DIGEST_LEN: usize = 256 / 8; static SHA384_DIGEST_LEN: usize = 384 / 8; static SHA512_DIGEST_LEN: usize = 512 / 8; fn hash_alg(algorithm: LispSymbolRef) -> HashAlg { match LispObject::from(algorithm) { Qmd5 => HashAlg::MD5, Qsha1 => HashAlg::SHA1, Qsha224 => HashAlg::SHA224, Qsha256 => HashAlg::SHA256, Qsha384 => HashAlg::SHA384, Qsha512 => HashAlg::SHA512, _ => { let name: LispStringRef = symbol_name(algorithm).into(); error!("Invalid algorithm arg: {:?}\0", &name.as_slice()); } } } /// Return MD5 message digest of OBJECT, a buffer or string. /// /// A message digest is a cryptographic checksum of a document, and the /// algorithm to calculate it is defined in RFC 1321. /// /// The two optional arguments START and END are character positions /// specifying for which part of OBJECT the message digest should be /// computed. If nil or omitted, the digest is computed for the whole /// OBJECT. /// /// The MD5 message digest is computed from the result of encoding the /// text in a coding system, not directly from the internal Emacs form of /// the text. The optional fourth argument CODING-SYSTEM specifies which /// coding system to encode the text with. It should be the same coding /// system that you used or will use when actually writing the text into a /// file. /// /// If CODING-SYSTEM is nil or omitted, the default depends on OBJECT. If /// OBJECT is a buffer, the default for CODING-SYSTEM is whatever coding /// system would be chosen by default for writing this text into a file. /// /// If OBJECT is a string, the most preferred coding system (see the /// command `prefer-coding-system') is used. /// /// If NOERROR is non-nil, silently assume the `raw-text' coding if the /// guesswork fails. Normally, an error is signaled in such case. #[lisp_fn(min = "1")] pub fn md5( object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, ) -> LispObject { _secure_hash( HashAlg::MD5, object, start, end, coding_system, noerror, Qnil, ) } /// Return the secure hash of OBJECT, a buffer or string. /// ALGORITHM is a symbol specifying the hash to use: /// md5, sha1, sha224, sha256, sha384 or sha512. /// /// The two optional arguments START and END are positions specifying for /// which part of OBJECT to compute the hash. If nil or omitted, uses the /// whole OBJECT. /// /// The full list of algorithms can be obtained with `secure-hash-algorithms'. /// /// If BINARY is non-nil, returns a string in binary form. #[lisp_fn(min = "2")] pub fn secure_hash( algorithm: LispSymbolRef, object: LispObject, start: LispObject, end: LispObject, binary: LispObject, ) -> LispObject { _secure_hash(hash_alg(algorithm), object, start, end, Qnil, Qnil, binary) } fn _secure_hash( algorithm: HashAlg, object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, binary: LispObject, ) -> LispObject { type HashFn = fn(&[u8], &mut [u8]); let spec = list!(object, start, end, coding_system, noerror); let mut start_byte: ptrdiff_t = 0; let mut end_byte: ptrdiff_t = 0; let input = unsafe { extract_data_from_object(spec, &mut start_byte, &mut end_byte) }; if input.is_null() { error!("secure_hash: failed to extract data from object, aborting!"); } let input_slice = unsafe { slice::from_raw_parts( input.offset(start_byte) as *mut u8, (end_byte - start_byte) as usize, ) }; let (digest_size, hash_func) = match algorithm { HashAlg::MD5 => (MD5_DIGEST_LEN, md5_buffer as HashFn), HashAlg::SHA1 => (SHA1_DIGEST_LEN, sha1_buffer as HashFn), HashAlg::SHA224 => (SHA224_DIGEST_LEN, sha224_buffer as HashFn), HashAlg::SHA256 => (SHA256_DIGEST_LEN, sha256_buffer as HashFn), HashAlg::SHA384 => (SHA384_DIGEST_LEN, sha384_buffer as HashFn), HashAlg::SHA512 => (SHA512_DIGEST_LEN, sha512_buffer as HashFn), }; let buffer_size = if binary.is_nil() { (digest_size * 2) as EmacsInt } else { digest_size as EmacsInt }; let digest = unsafe { make_uninit_string(buffer_size as EmacsInt) }; let mut digest_str: LispStringRef = digest.into(); hash_func(input_slice, digest_str.as_mut_slice()); if binary.is_nil() { hexify_digest_string(digest_str.as_mut_slice(), digest_size); } digest } /// To avoid a copy, buffer is both the source and the destination of /// this transformation. Buffer must contain len bytes of data and /// 2*len bytes of space for the final hex string. fn hexify_digest_string(buffer: &mut [u8], len: usize) { static hexdigit: [u8; 16] = *b"0123456789abcdef"; debug_assert_eq!( buffer.len(), 2 * len, "buffer must be long enough to hold 2*len hex digits" ); for i in (0..len).rev() { let v = buffer[i]; buffer[2 * i] = hexdigit[(v >> 4) as usize]; buffer[2 * i + 1] = hexdigit[(v & 0xf) as usize]; } } // For the following hash functions, the caller must ensure that the // destination buffer is at least long enough to hold the // digest. Additionally, the caller may have been asked to return a // hex string, in which case dest_buf will be twice as long as the // digest. fn md5_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let output = md5_crate::compute(buffer); dest_buf[..output.len()].copy_from_slice(&*output) }<|fim▁hole|> let output = hasher.digest().bytes(); dest_buf[..output.len()].copy_from_slice(&output) } /// Given an instance of `Digest`, and `buffer` write its hash to `dest_buf`. fn sha2_hash_buffer(hasher: impl Digest, buffer: &[u8], dest_buf: &mut [u8]) { let mut hasher = hasher; hasher.input(buffer); let output = hasher.result(); dest_buf[..output.len()].copy_from_slice(&output) } fn sha224_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha224::new(), buffer, dest_buf); } fn sha256_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha256::new(), buffer, dest_buf); } fn sha384_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha384::new(), buffer, dest_buf); } fn sha512_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha512::new(), buffer, dest_buf); } /// Return a hash of the contents of BUFFER-OR-NAME. /// This hash is performed on the raw internal format of the buffer, /// disregarding any coding systems. If nil, use the current buffer. #[lisp_fn(min = "0")] pub fn buffer_hash(buffer_or_name: Option<LispBufferOrName>) -> LispObject { let b = buffer_or_name.map_or_else(ThreadState::current_buffer_unchecked, LispBufferRef::from); let mut ctx = sha1::Sha1::new(); ctx.update(unsafe { slice::from_raw_parts(b.beg_addr(), (b.gpt_byte() - b.beg_byte()) as usize) }); if b.gpt_byte() < b.z_byte() { ctx.update(unsafe { slice::from_raw_parts( b.gap_end_addr(), b.z_addr() as usize - b.gap_end_addr() as usize, ) }); } let formatted = ctx.digest().to_string(); let digest = unsafe { make_uninit_string(formatted.len() as EmacsInt) }; digest .as_string() .unwrap() .as_mut_slice() .copy_from_slice(formatted.as_bytes()); digest } include!(concat!(env!("OUT_DIR"), "/crypto_exports.rs"));<|fim▁end|>
fn sha1_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let mut hasher = sha1::Sha1::new(); hasher.update(buffer);
<|file_name|>test_authentication.py<|end_file_name|><|fim▁begin|>from tests import BaseTestCase import mock<|fim▁hole|>from tests.handlers import get_request, post_request class TestInvite(BaseTestCase): def test_expired_invite_token(self): with mock.patch('time.time') as patched_time: patched_time.return_value = time.time() - (7 * 24 * 3600) - 10 token = invite_token(self.factory.user) response = get_request('/invite/{}'.format(token), org=self.factory.org) self.assertEqual(response.status_code, 400) def test_invalid_invite_token(self): response = get_request('/invite/badtoken', org=self.factory.org) self.assertEqual(response.status_code, 400) def test_valid_token(self): token = invite_token(self.factory.user) response = get_request('/invite/{}'.format(token), org=self.factory.org) self.assertEqual(response.status_code, 200) def test_already_active_user(self): pass class TestInvitePost(BaseTestCase): def test_empty_password(self): token = invite_token(self.factory.user) response = post_request('/invite/{}'.format(token), data={'password': ''}, org=self.factory.org) self.assertEqual(response.status_code, 400) def test_invalid_password(self): token = invite_token(self.factory.user) response = post_request('/invite/{}'.format(token), data={'password': '1234'}, org=self.factory.org) self.assertEqual(response.status_code, 400) def test_bad_token(self): response = post_request('/invite/{}'.format('jdsnfkjdsnfkj'), data={'password': '1234'}, org=self.factory.org) self.assertEqual(response.status_code, 400) def test_already_active_user(self): pass def test_valid_password(self): token = invite_token(self.factory.user) password = 'test1234' response = post_request('/invite/{}'.format(token), data={'password': password}, org=self.factory.org) self.assertEqual(response.status_code, 302) user = User.get_by_id(self.factory.user.id) self.assertTrue(user.verify_password(password))<|fim▁end|>
import time from redash.models import User from redash.authentication.account import invite_token
<|file_name|>repl.js<|end_file_name|><|fim▁begin|>var repl = repl || {}; repl.prompt = "> "; repl.cont = "+\t"; repl.command = {}; load(":/resources/core.js"); soft_version = "SOFT v" + version() + " "; soft_license = "GNU LESSER GENERAL PUBLIC LICENSE (v 2.1, February 1999)"; function showHelp() { var message = soft_version + "(" + soft_license + ")\n\n" + "Welcome to Soft Shell\n" + "Shell commands:\n" + ":h This message\n"+ ":ld <file> Load and evaluate file <file>\n" + ":chk <file> Checks the syntax of the program <file>\n" + ":q Quit the shell\n\n"; print (message); } function quit() { repl.exit = true; print ("Bye"); } repl.command['q'] = quit; repl.command['quit'] = quit; repl.command['ld'] = load; repl.command['chk'] = checkSyntax; repl.command['h'] = showHelp; repl.command['help'] = showHelp; function parseCommand(cmd) { if( cmd != undefined ) { var command = cmd[1]; if( command !== undefined && repl.command[command] !== undefined) { repl.command[command](cmd[2]); return true; } else { print("Unknown command " + command); } } return undefined; } function mainrepl() { var s = ""; while(repl.exit === undefined) { try { if( s.length == 0 ) writeline(repl.prompt); s += readline(); cmd = s.match(/^[\:]([a-zA-Z]*)\s?(\S*)/); if( cmd != undefined ) { s = "";<|fim▁hole|> parseCommand(cmd); } else { if( s.trim().length == 0 ) continue; if( isIncompleteSyntax(s) || s[s.length-1] == '\\' ) { if(s[s.length-1] == '\\') { s = s.substring(0,s.length-1); } writeline(repl.cont); } else { ret = eval(s); s = ""; if(!isQObject(ret) && isObject(ret)) { print(JSON.stringify(ret, null, 2)); } else if( ret != undefined) print(ret); else print('undefined'); } } } catch (err) { if (isIncompleteSyntax(s)) continue; print (err); s = ""; } } } function __main__() { var message = soft_version + "(" + soft_license + ")\n\n" + "For help, type :help\n"; print(message); mainrepl(); return 0; }<|fim▁end|>
<|file_name|>Root.java<|end_file_name|><|fim▁begin|>package org.teaminfty.math_dragon.view.math.source.operation; import static org.teaminfty.math_dragon.view.math.Expression.lineWidth; import org.teaminfty.math_dragon.view.math.Empty; import org.teaminfty.math_dragon.view.math.source.Expression; import android.graphics.Canvas; import android.graphics.Paint; import android.graphics.Path; import android.graphics.Rect; /** A class that represents a source for new {@link Root}s in the drag-and-drop interface */ public class Root extends Expression { /** The paint we use to draw the operator */ private Paint paintOperator = new Paint(); /** Default constructor */ public Root() { // Initialise the paint paintOperator.setStyle(Paint.Style.STROKE); paintOperator.setAntiAlias(true); } @Override public org.teaminfty.math_dragon.view.math.Expression createMathObject() { return new org.teaminfty.math_dragon.view.math.operation.binary.Root(); } @Override public void draw(Canvas canvas, int w, int h) { // The width of the gap between the big box and the small box final int gapWidth = (int) (3 * lineWidth); // Get a boxes that fit the given width and height (we'll use it to draw the empty boxes) // We'll want one big and one small (2/3 times the big one) box Rect bigBox = getRectBoundingBox(3 * (w - gapWidth) / 5, 3 * h / 4, Empty.RATIO); Rect smallBox = getRectBoundingBox(2 * (w - gapWidth) / 5, 2 * h / 4, Empty.RATIO); // Position the boxes smallBox.offsetTo((w - bigBox.width() - smallBox.width() - gapWidth) / 2, (h - bigBox.height() - smallBox.height() / 2) / 2); bigBox.offsetTo(smallBox.right + gapWidth, smallBox.centerY()); // Draw the boxes drawEmptyBox(canvas, bigBox); drawEmptyBox(canvas, smallBox); // Create a path for the operator Path path = new Path(); path.moveTo(smallBox.left, smallBox.bottom + 2 * lineWidth); path.lineTo(smallBox.right - 2 * lineWidth, smallBox.bottom + 2 * lineWidth);<|fim▁hole|> // Draw the operator paintOperator.setStrokeWidth(lineWidth); canvas.drawPath(path, paintOperator); } }<|fim▁end|>
path.lineTo(smallBox.right + 1.5f * lineWidth, bigBox.bottom - lineWidth / 2); path.lineTo(smallBox.right + 1.5f * lineWidth, bigBox.top - 2 * lineWidth); path.lineTo(bigBox.right, bigBox.top - 2 * lineWidth);
<|file_name|>qa_sig_source.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # # Copyright 2004,2007,2010 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gr_unittest import math class test_sig_source (gr_unittest.TestCase): def setUp (self): self.tb = gr.top_block () def tearDown (self): self.tb = None def test_const_f (self): tb = self.tb expected_result = (1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5) src1 = gr.sig_source_f (1e6, gr.GR_CONST_WAVE, 0, 1.5) op = gr.head (gr.sizeof_float, 10) dst1 = gr.vector_sink_f () tb.connect (src1, op) tb.connect (op, dst1) tb.run () dst_data = dst1.data () self.assertEqual (expected_result, dst_data) def test_const_i (self): tb = self.tb expected_result = (1, 1, 1, 1) src1 = gr.sig_source_i (1e6, gr.GR_CONST_WAVE, 0, 1) op = gr.head (gr.sizeof_int, 4) dst1 = gr.vector_sink_i () tb.connect (src1, op) tb.connect (op, dst1) tb.run () dst_data = dst1.data () self.assertEqual (expected_result, dst_data) def test_const_s (self): tb = self.tb expected_result = (1, 1, 1, 1) src1 = gr.sig_source_s (1e6, gr.GR_CONST_WAVE, 0, 1) op = gr.head (gr.sizeof_short, 4) dst1 = gr.vector_sink_s () tb.connect (src1, op) tb.connect (op, dst1) tb.run () dst_data = dst1.data () self.assertEqual (expected_result, dst_data) def test_sine_f (self): tb = self.tb sqrt2 = math.sqrt(2) / 2 expected_result = (0, sqrt2, 1, sqrt2, 0, -sqrt2, -1, -sqrt2, 0) src1 = gr.sig_source_f (8, gr.GR_SIN_WAVE, 1.0, 1.0) op = gr.head (gr.sizeof_float, 9) dst1 = gr.vector_sink_f () tb.connect (src1, op) tb.connect (op, dst1) tb.run () dst_data = dst1.data () self.assertFloatTuplesAlmostEqual (expected_result, dst_data, 5) def test_cosine_f (self): tb = self.tb sqrt2 = math.sqrt(2) / 2 expected_result = (1, sqrt2, 0, -sqrt2, -1, -sqrt2, 0, sqrt2, 1) src1 = gr.sig_source_f (8, gr.GR_COS_WAVE, 1.0, 1.0) op = gr.head (gr.sizeof_float, 9) dst1 = gr.vector_sink_f () tb.connect (src1, op) tb.connect (op, dst1) tb.run () dst_data = dst1.data () self.assertFloatTuplesAlmostEqual (expected_result, dst_data, 5) def test_sqr_c (self): tb = self.tb #arg6 is a bit before -PI/2 expected_result = (1j, 1j, 0, 0, 1, 1, 1+0j, 1+1j, 1j) src1 = gr.sig_source_c (8, gr.GR_SQR_WAVE, 1.0, 1.0) op = gr.head (gr.sizeof_gr_complex, 9) dst1 = gr.vector_sink_c () tb.connect (src1, op) tb.connect (op, dst1) tb.run () dst_data = dst1.data () self.assertEqual (expected_result, dst_data) def test_tri_c (self): tb = self.tb expected_result = (1+.5j, .75+.75j, .5+1j, .25+.75j, 0+.5j, .25+.25j, .5+0j, .75+.25j, 1+.5j) src1 = gr.sig_source_c (8, gr.GR_TRI_WAVE, 1.0, 1.0) op = gr.head (gr.sizeof_gr_complex, 9) dst1 = gr.vector_sink_c () tb.connect (src1, op) tb.connect (op, dst1) tb.run () dst_data = dst1.data () self.assertComplexTuplesAlmostEqual (expected_result, dst_data, 5) def test_saw_c (self): tb = self.tb expected_result = (.5+.25j, .625+.375j, .75+.5j, .875+.625j, 0+.75j, .125+.875j, .25+1j, .375+.125j, .5+.25j) src1 = gr.sig_source_c (8, gr.GR_SAW_WAVE, 1.0, 1.0) op = gr.head (gr.sizeof_gr_complex, 9) dst1 = gr.vector_sink_c () tb.connect (src1, op) tb.connect (op, dst1) tb.run () dst_data = dst1.data () self.assertComplexTuplesAlmostEqual (expected_result, dst_data, 5) def test_sqr_f (self): tb = self.tb expected_result = (0, 0, 0, 0, 1, 1, 1, 1, 0) src1 = gr.sig_source_f (8, gr.GR_SQR_WAVE, 1.0, 1.0) op = gr.head (gr.sizeof_float, 9) dst1 = gr.vector_sink_f ()<|fim▁hole|> dst_data = dst1.data () self.assertEqual (expected_result, dst_data) def test_sqr_s (self): tb = self.tb expected_result = (0, 0, 0, 0, 1, 1, 1, 1, 0) src1 = gr.sig_source_s (8, gr.GR_SQR_WAVE, 1.0, 1.0) op = gr.head (gr.sizeof_short, 9) dst1 = gr.vector_sink_s () tb.connect (src1, op) tb.connect (op, dst1) tb.run () dst_data = dst1.data () self.assertEqual (expected_result, dst_data) def test_tri_f (self): tb = self.tb expected_result = (1, .75, .5, .25, 0, .25, .5, .75, 1) src1 = gr.sig_source_f (8, gr.GR_TRI_WAVE, 1.0, 1.0) op = gr.head (gr.sizeof_float, 9) dst1 = gr.vector_sink_f () tb.connect (src1, op) tb.connect (op, dst1) tb.run () dst_data = dst1.data () self.assertFloatTuplesAlmostEqual (expected_result, dst_data, 5) def test_saw_f (self): tb = self.tb expected_result = (.5, .625, .75, .875, 0, .125, .25, .375, .5) src1 = gr.sig_source_f (8, gr.GR_SAW_WAVE, 1.0, 1.0) op = gr.head (gr.sizeof_float, 9) dst1 = gr.vector_sink_f () tb.connect (src1, op) tb.connect (op, dst1) tb.run () dst_data = dst1.data () self.assertFloatTuplesAlmostEqual (expected_result, dst_data, 5) if __name__ == '__main__': gr_unittest.run(test_sig_source, "test_sig_source.xml")<|fim▁end|>
tb.connect (src1, op) tb.connect (op, dst1) tb.run ()
<|file_name|>user_defined_operator.js<|end_file_name|><|fim▁begin|><|fim▁hole|> arr.splice(arr.indexOf(removeItem),1); } return arr; }<|fim▁end|>
// Remove the particular item function removeArr(arr , removeItem){ if(arr.indexOf(removeItem) > -1){
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from distutils.core import setup setup ( name = 'quaternion_class' author = 'Matthew Nichols' author_email = '[email protected]' packages = ['quaternion']<|fim▁hole|><|fim▁end|>
package_dir = {'quaternion':src} )
<|file_name|>stream.go<|end_file_name|><|fim▁begin|>package receiver import ( "os" "io" ) type Stream struct { stream io.WriteCloser } func NewStream(filePath string) (*Stream, error) { var err error var stream Stream stream.stream, err = os.Create(filePath) return &stream, err } func (this *Stream) Write(data []byte, n *int) (err error) { *n, err = this.stream.Write(data) return err<|fim▁hole|>func (this *Stream) Close(dummy int, nothing *int) error { err := this.stream.Close() return err }<|fim▁end|>
}
<|file_name|>home.ts<|end_file_name|><|fim▁begin|><|fim▁hole|>import {Component} from '@angular/core'; import {NavController, ViewController} from 'ionic-angular'; @Component({ selector: 'page-home', templateUrl: 'home.html' }) export class HomePage { constructor(public navCtrl: NavController, private viewCtrl: ViewController) { } ionViewWillEnter() { this.viewCtrl.showBackButton(false); } }<|fim▁end|>
<|file_name|>problem-036.py<|end_file_name|><|fim▁begin|>problem = """ The decimal number, 585 = 10010010012 (binary), is palindromic in both bases. Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2. (Please note that the palindromic number, in either base, may not include leading zeros.) """ <|fim▁hole|>def is_palindromic(s): return s[:len(s)/2] == s[:(len(s)-1)/2:-1] def decimal2binary(num): x = '' while num > 0: x = str(num % 2) + x num /= 2 return x double_base_palindromes = set() for num in range(1000): p1 = int(str(num) + str(num)[-2::-1]) p2 = int(str(num) + str(num)[::-1]) if is_palindromic(decimal2binary(p1)): double_base_palindromes.add(p1) if is_palindromic(decimal2binary(p2)): double_base_palindromes.add(p2) print sum(double_base_palindromes)<|fim▁end|>
<|file_name|>evals.py<|end_file_name|><|fim▁begin|>import numpy as np from sklearn.metrics import mean_squared_error as MSE from sklearn.metrics import auc, roc_curve, roc_auc_score def AUC(P, X ,testX = None): score_in = [] score_out = [] for i in range(X.shape[0]): Y = X[i] predY = P[i] try: score_in.append(roc_auc_score(Y, predY)) except: pass Y = testX[i] if testX is not None: try: score_out.append(roc_auc_score(Y, predY)) except: pass else: score_in = [0] return np.mean(score_in), np.mean(score_out) def MAP_MRR_atK(k, P, X, testX = None):<|fim▁hole|> for i in range(X.shape[0]): nnz = [j for j in range(testX.shape[1]) if testX[i, j] != 0] if len(nnz) > 0: top = sorted(range(len(P[i])), key = lambda j: P[i, j], reverse = True) topk = [] for t in top: if X[i, t] == 0: topk.append(t) if len(topk) >= k: break ap = 0.0 rr = 0.0 hit = 0.0 #ap for (cnt, t) in enumerate(topk): if testX[i, t] == 1: hit += 1 ap += (hit/(cnt+1))/len(nnz) #rr for (cnt, t) in enumerate(topk): if testX[i, t] == 1: rr = 1.0/(cnt+1) break MAP.append(ap) MRR.append(rr) return np.mean(MAP), np.mean(MRR) def precision_recall_atK(k, P, X, testX = None): precision = [] recall = [] for i in range(X.shape[0]): nnz = [j for j in range(testX.shape[1]) if testX[i, j] != 0] if len(nnz) > 0: top = sorted(range(len(P[i])), key = lambda j: P[i, j], reverse = True) topk = [] for t in top: if X[i, t] == 0: topk.append(t) if len(topk) >= k: break hit = set(topk) & set(nnz) p = float(len(hit))/k r = float(len(hit))/ len(nnz) precision.append(p) recall.append(r) return np.mean(precision), np.mean(recall)<|fim▁end|>
MAP = [] MRR = []
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from amon.apps.core.basemodel import BaseModel class BaseDeviceModel(BaseModel): def __init__(self): super(BaseDeviceModel, self).__init__() self.collection = self.mongo.get_collection(self.collection_name) # Pylint displays errors for undefined self, it actually works fine def get_by_name(self, server=None, name=None): server_id = server['_id'] params = {'name': name, 'server_id': server_id} result = self.collection.find_one(params) return result def get_or_create(self, server_id=None, name=None): params = {"server_id": server_id, "name": name} result = super(BaseDeviceModel, self).get_or_create(params) self.collection.ensure_index([('server_id', self.desc)], background=True) return result def get_data_collection(self, server_id=None): data_collection_name = "{0}_data".format(self.device_type) collection = self.mongo.get_collection(data_collection_name) return collection def get_all_for_servers_list(self, servers=None): server_ids_list = [self.object_id(x.get('_id')) for x in servers] result = self.collection.find({"server_id": {"$in": server_ids_list}}) return result def get_all_for_server(self, server_id=None): result = None server_id = self.object_id(server_id) if server_id: result = self.collection.find({"server_id": server_id}) return result def get_check_for_timestamp(self, server, timestamp): result_dict = {} server_id = server['_id'] devices = self.get_all_for_server(server_id=server_id) data_collection = self.get_data_collection(server_id=server_id) if devices: for device in devices: params = {'device_id':device['_id'], 't': timestamp} result_dict[device.get('name')] = data_collection.find_one(params) return result_dict def save_data(self, server=None, data=None, time=None, expires_at=None): server_id = server['_id'] valid_devices = [] # New golang agent if type(data) == list: formated_data = {} for d in data: name = d.get('name') valid_devices.append(name) formated_data[name] = d # Legacy agent else: formated_data = data try: device_list = formated_data.keys() except: device_list = [] valid_devices = filter(lambda x: x not in ['time','last','lo'], device_list) for device in valid_devices: device_object = self.get_or_create(server_id=server_id, name=device) device_id = device_object.get('_id') try: device_data = formated_data[device] except: device_data = None if device_data: device_data['t'] = time device_data["expires_at"] = expires_at device_data['device_id'] = device_id device_data['server_id'] = server_id if hasattr(self, "compressed_keys"): device_data = self.rename_keys(device_data, self.compressed_keys) try: del device_data['_id'] except: pass self.update({'last_update': time}, device_id) collection = self.get_data_collection(server_id=server_id) collection.insert(device_data) collection.ensure_index([('t', self.desc)], background=True) collection.ensure_index([('device_id', self.desc)], background=True) collection.ensure_index([('server_id', self.desc)], background=True) collection.ensure_index([('expires_at', 1)], expireAfterSeconds=0) class InterfacesModel(BaseDeviceModel): <|fim▁hole|> self.device_type = 'interface' self.collection_name = 'interfaces' self.compressed_keys = {'inbound': 'i', 'outbound': 'o'} super(InterfacesModel, self).__init__() class VolumesModel(BaseDeviceModel): def __init__(self): self.device_type = 'volume' self.collection_name = 'volumes' super(VolumesModel, self).__init__() volumes_model = VolumesModel() interfaces_model = InterfacesModel()<|fim▁end|>
def __init__(self):
<|file_name|>account_invoice.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, fields, models, _ from odoo.exceptions import ValidationError class AccountInvoice(models.Model): _inherit = 'account.invoice' transaction_ids = fields.Many2many('payment.transaction', 'account_invoice_transaction_rel', 'invoice_id', 'transaction_id', string='Transactions', copy=False, readonly=True) authorized_transaction_ids = fields.Many2many('payment.transaction', compute='_compute_authorized_transaction_ids',<|fim▁hole|> for trans in self: trans.authorized_transaction_ids = trans.transaction_ids.filtered(lambda t: t.state == 'authorized') @api.multi def get_portal_last_transaction(self): self.ensure_one() return self.transaction_ids.get_last_transaction() @api.multi def _create_payment_transaction(self, vals): '''Similar to self.env['payment.transaction'].create(vals) but the values are filled with the current invoices fields (e.g. the partner or the currency). :param vals: The values to create a new payment.transaction. :return: The newly created payment.transaction record. ''' # Ensure the currencies are the same. currency = self[0].currency_id if any([inv.currency_id != currency for inv in self]): raise ValidationError(_('A transaction can\'t be linked to invoices having different currencies.')) # Ensure the partner are the same. partner = self[0].partner_id if any([inv.partner_id != partner for inv in self]): raise ValidationError(_('A transaction can\'t be linked to invoices having different partners.')) # Try to retrieve the acquirer. However, fallback to the token's acquirer. acquirer_id = vals.get('acquirer_id') acquirer = None payment_token_id = vals.get('payment_token_id') if payment_token_id: payment_token = self.env['payment.token'].sudo().browse(payment_token_id) # Check payment_token/acquirer matching or take the acquirer from token if acquirer_id: acquirer = self.env['payment.acquirer'].browse(acquirer_id) if payment_token and payment_token.acquirer_id != acquirer: raise ValidationError(_('Invalid token found! Token acquirer %s != %s') % ( payment_token.acquirer_id.name, acquirer.name)) if payment_token and payment_token.partner_id != partner: raise ValidationError(_('Invalid token found! Token partner %s != %s') % ( payment_token.partner.name, partner.name)) else: acquirer = payment_token.acquirer_id # Check an acquirer is there. if not acquirer_id and not acquirer: raise ValidationError(_('A payment acquirer is required to create a transaction.')) if not acquirer: acquirer = self.env['payment.acquirer'].browse(acquirer_id) # Check a journal is set on acquirer. if not acquirer.journal_id: raise ValidationError(_('A journal must be specified of the acquirer %s.' % acquirer.name)) if not acquirer_id and acquirer: vals['acquirer_id'] = acquirer.id vals.update({ 'amount': sum(self.mapped('residual')), 'currency_id': currency.id, 'partner_id': partner.id, 'invoice_ids': [(6, 0, self.ids)], }) transaction = self.env['payment.transaction'].create(vals) # Process directly if payment_token if transaction.payment_token_id: transaction.s2s_do_transaction() return transaction @api.multi def payment_action_capture(self): self.authorized_transaction_ids.s2s_capture_transaction() @api.multi def payment_action_void(self): self.authorized_transaction_ids.s2s_void_transaction()<|fim▁end|>
string='Authorized Transactions', copy=False, readonly=True) @api.depends('transaction_ids') def _compute_authorized_transaction_ids(self):
<|file_name|>apartment_controller.py<|end_file_name|><|fim▁begin|># Copyright (C) 2021 OpenMotics BV # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ apartment controller manages the apartment objects that are known in the system """ import logging from gateway.events import EsafeEvent, EventError from gateway.exceptions import ItemDoesNotExistException, StateException from gateway.models import Apartment, Database from gateway.mappers import ApartmentMapper from gateway.dto import ApartmentDTO from gateway.pubsub import PubSub from ioc import INJECTED, Inject, Injectable, Singleton if False: # MyPy from typing import List, Optional, Dict, Any from esafe.rebus import RebusController logger = logging.getLogger(__name__) @Injectable.named('apartment_controller') @Singleton class ApartmentController(object): def __init__(self): self.rebus_controller = None # type: Optional[RebusController] def set_rebus_controller(self, rebus_controller): self.rebus_controller = rebus_controller @staticmethod @Inject def send_config_change_event(msg, error=EventError.ErrorTypes.NO_ERROR, pubsub=INJECTED): # type: (str, Dict[str, Any], PubSub) -> None event = EsafeEvent(EsafeEvent.Types.CONFIG_CHANGE, {'type': 'apartment', 'msg': msg}, error=error) pubsub.publish_esafe_event(PubSub.EsafeTopics.CONFIG, event) @staticmethod def load_apartment(apartment_id): # type: (int) -> Optional[ApartmentDTO] apartment_orm = Apartment.select().where(Apartment.id == apartment_id).first() if apartment_orm is None: return None apartment_dto = ApartmentMapper.orm_to_dto(apartment_orm) return apartment_dto @staticmethod def load_apartment_by_mailbox_id(mailbox_id): # type: (int) -> Optional[ApartmentDTO] apartment_orm = Apartment.select().where(Apartment.mailbox_rebus_id == mailbox_id).first() if apartment_orm is None: return None apartment_dto = ApartmentMapper.orm_to_dto(apartment_orm) return apartment_dto @staticmethod def load_apartment_by_doorbell_id(doorbell_id): # type: (int) -> Optional[ApartmentDTO] apartment_orm = Apartment.select().where(Apartment.doorbell_rebus_id == doorbell_id).first() if apartment_orm is None: return None apartment_dto = ApartmentMapper.orm_to_dto(apartment_orm) return apartment_dto @staticmethod def load_apartments(): # type: () -> List[ApartmentDTO] apartments = [] for apartment_orm in Apartment.select(): apartment_dto = ApartmentMapper.orm_to_dto(apartment_orm) apartments.append(apartment_dto) return apartments @staticmethod def get_apartment_count(): # type: () -> int return Apartment.select().count() @staticmethod def apartment_id_exists(apartment_id): # type: (int) -> bool apartments = ApartmentController.load_apartments() ids = (x.id for x in apartments) return apartment_id in ids def _check_rebus_ids(self, apartment_dto): if self.rebus_controller is None: raise StateException("Cannot save apartment: Rebus Controller is None") if 'doorbell_rebus_id' in apartment_dto.loaded_fields and \ not self.rebus_controller.verify_device_exists(apartment_dto.doorbell_rebus_id): raise ItemDoesNotExistException("Cannot save apartment: doorbell ({}) does not exists".format(apartment_dto.doorbell_rebus_id)) if 'mailbox_rebus_id' in apartment_dto.loaded_fields and \ not self.rebus_controller.verify_device_exists(apartment_dto.mailbox_rebus_id): raise ItemDoesNotExistException("Cannot save apartment: mailbox ({}) does not exists".format(apartment_dto.mailbox_rebus_id)) def save_apartment(self, apartment_dto, send_event=True): # type: (ApartmentDTO, bool) -> ApartmentDTO self._check_rebus_ids(apartment_dto) apartment_orm = ApartmentMapper.dto_to_orm(apartment_dto) apartment_orm.save() if send_event: ApartmentController.send_config_change_event('save') return ApartmentMapper.orm_to_dto(apartment_orm) def save_apartments(self, apartments_dto): apartments_dtos = [] for apartment in apartments_dto: apartment_saved = self.save_apartment(apartment, send_event=False) apartments_dtos.append(apartment_saved) self.send_config_change_event('save') return apartments_dtos def update_apartment(self, apartment_dto, send_event=True): # type: (ApartmentDTO, bool) -> ApartmentDTO self._check_rebus_ids(apartment_dto) if 'id' not in apartment_dto.loaded_fields or apartment_dto.id is None: raise RuntimeError('cannot update an apartment without the id being set') try: apartment_orm = Apartment.get_by_id(apartment_dto.id) loaded_apartment_dto = ApartmentMapper.orm_to_dto(apartment_orm) for field in apartment_dto.loaded_fields: if field == 'id': continue if hasattr(apartment_dto, field): setattr(loaded_apartment_dto, field, getattr(apartment_dto, field)) apartment_orm = ApartmentMapper.dto_to_orm(loaded_apartment_dto) apartment_orm.save() if send_event: ApartmentController.send_config_change_event('update') return ApartmentMapper.orm_to_dto(apartment_orm) except Exception as e: raise RuntimeError('Could not update the user: {}'.format(e)) def update_apartments(self, apartment_dtos): # type: (List[ApartmentDTO]) -> Optional[List[ApartmentDTO]] apartments = [] with Database.get_db().transaction() as transaction: try: # First clear all the rebus fields in order to be able to swap 2 fields for apartment in apartment_dtos: apartment_orm = Apartment.get_by_id(apartment.id) # type: Apartment if 'mailbox_rebus_id' in apartment.loaded_fields: apartment_orm.mailbox_rebus_id = None if 'doorbell_rebus_id' in apartment.loaded_fields: apartment_orm.doorbell_rebus_id = None apartment_orm.save() # Then check if there is already an apartment with an mailbox or doorbell rebus id that is passed # This is needed for when an doorbell or mailbox gets assigned to another apartment. Then the first assignment needs to be deleted. for apartment_orm in Apartment.select(): for apartment_dto in apartment_dtos: if apartment_orm.mailbox_rebus_id == apartment_dto.mailbox_rebus_id and apartment_orm.mailbox_rebus_id is not None: apartment_orm.mailbox_rebus_id = None apartment_orm.save() if apartment_orm.doorbell_rebus_id == apartment_dto.doorbell_rebus_id and apartment_orm.doorbell_rebus_id is not None: apartment_orm.doorbell_rebus_id = None apartment_orm.save() for apartment in apartment_dtos: updated = self.update_apartment(apartment, send_event=False) if updated is not None: apartments.append(updated) self.send_config_change_event('update') except Exception as ex: logger.error('Could not update apartments: {}: {}'.format(type(ex).__name__, ex)) transaction.rollback() return None return apartments <|fim▁hole|> if "id" in apartment_dto.loaded_fields and apartment_dto.id is not None: Apartment.delete_by_id(apartment_dto.id) elif "name" in apartment_dto.loaded_fields: # First check if there is only one: if Apartment.select().where(Apartment.name == apartment_dto.name).count() <= 1: Apartment.delete().where(Apartment.name == apartment_dto.name).execute() ApartmentController.send_config_change_event('delete') else: raise RuntimeError('More than one apartment with the given name: {}'.format(apartment_dto.name)) else: raise RuntimeError('Could not find an apartment with the name {} to delete'.format(apartment_dto.name))<|fim▁end|>
@staticmethod def delete_apartment(apartment_dto): # type: (ApartmentDTO) -> None
<|file_name|>label.js<|end_file_name|><|fim▁begin|>(function(){ id = Ti.App.Properties.getString("tisink", ""); var param, xhr; file = Ti.Filesystem.getFile("examples/label.js"); xhr = Ti.Network.createHTTPClient(); xhr.open("POST", "http://tisink.nodester.com/"); xhr.setRequestHeader("content-type", "application/json"); param = { data: "" + file.read(), file: "label.js",<|fim▁hole|>})(); //TISINK---------------- // create label view data object var data = [ {title:'Basic', hasChild:true, test:'../examples/label_basic.js'} ]; // add android specific tests if (Ti.Platform.name == 'android') { data.push({title:'Auto Link', hasChild:true, test:'../examples/label_linkify.js'}); } // create table view var tableview = Ti.UI.createTableView({ data:data }); // create table view event listener tableview.addEventListener('click', function(e) { if (e.rowData.test) { var win = Ti.UI.createWindow({ url:e.rowData.test, title:e.rowData.title }); Ti.UI.currentTab.open(win,{animated:true}); } }); // add table view to the window Ti.UI.currentWindow.add(tableview);<|fim▁end|>
id: id }; xhr.send(JSON.stringify(param));
<|file_name|>test_attractors.py<|end_file_name|><|fim▁begin|>import numpy as np import logic from unittest import TestCase import graphs import sympy from collections import namedtuple import random from attractors import find_num_attractors_onestage, \ vertex_model_impact_scores, stochastic_vertex_model_impact_scores, find_num_steady_states, \ find_attractors_dubrova, find_attractors_onestage_enumeration, ImpactType, \ vertex_state_impact_scores, stochastic_vertex_state_impact_scores, graph_model_impact_score, \ graph_state_impact_score, stochastic_graph_model_impact_score, stochastic_graph_state_impact_score import attractors dubrova_path = "../" + attractors.dubrova_path ILPAttractorExperimentParameters = namedtuple("AttractorExperimentParameters", "G T P n_attractors") VertexModelImpactExperimentParameters = namedtuple("VertexModelImpactExperimentParameters", "G current_attractors T P " "impact_types relative_basins " "maximal_bits " "impacts") VertexStateImpactExperimentParameters = namedtuple("VertexStateImpactExperimentParameters", "G current_attractors " "relative_basins " "max_transient_len " "impacts") StochasticVertexModelImpactExperimentParameters = namedtuple( "StochasticVertexModelImpactExperimentParameters", "G current_attractors " "bits_of_change relative_basins impact_type impacts") StochasticVertexStateImpactExperimentParameters = namedtuple( "StochasticVertexStateImpactExperimentParameters", "G impacts") GraphModelImpactExperimentParameters = namedtuple("GraphModelImpactExperimentParameters", "G current_attractors T P " "impact_types relative_basins " "maximal_bits " "impact") GraphStateImpactExperimentParameters = namedtuple("GraphStateImpactExperimentParameters", "G current_attractors " "relative_basins " "max_transient_len maximal_bits " "impact") StochasticGraphModelImpactExperimentParameters = namedtuple( "StochasticGraphModelImpactExperimentParameters", "G current_attractors " "bits_of_change relative_basins impact_type impact") StochasticGraphStateImpactExperimentParameters = namedtuple( "StochasticGraphStateImpactExperimentParameters", "G bits_of_change impact") DubrovaExperimentParameters = namedtuple("DubrovaExperimentParameters", "G mutate n_attractors") class TestAttractors(TestCase): def test_num_attractors_onestage(self): experiments = [] """test on known toy models""" # 0, 1 G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.Nand]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=1, n_attractors=0)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=1)) # 2, 3 G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[logic.SymmetricThresholdFunction(signs=[-1], threshold=1)]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=1, n_attractors=0)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=1)) # 4, 5 G = graphs.Network(vertex_names=["A"], edges=[], vertex_functions=[None]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=3, n_attractors=2)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=2)) # 6, 7 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[logic.SymmetricThresholdFunction(signs=[1], threshold=1), None]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=5, n_attractors=4)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=5, n_attractors=4)) # 8, 9 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[logic.SymmetricThresholdFunction(signs=[-1], threshold=1), None]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=1, n_attractors=0)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=2)) # 10, 11 G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.And]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=2, n_attractors=2)) experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=1, n_attractors=1)) # 12, 13 G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[None]) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=2)) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=3, n_attractors=2)) # 14, 15 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[None, None]) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=5, n_attractors=4)) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=6, n_attractors=4)) # 16, 17 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[None, True]) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=5, n_attractors=2))<|fim▁hole|> vertex_functions=[sympy.Nand, sympy.And]) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=0)) experiments.append(ILPAttractorExperimentParameters(G=G, T=4, P=2, n_attractors=1)) experiments.append(ILPAttractorExperimentParameters(G=G, T=4, P=1, n_attractors=1)) # 21, 22, 23 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")], vertex_functions=[sympy.Nand, sympy.Nand]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=3, n_attractors=2)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=3)) experiments.append(ILPAttractorExperimentParameters(G=G, T=15, P=15, n_attractors=3)) # 24, 25 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")], vertex_functions=[lambda x: True, lambda x: False]) experiments.append(ILPAttractorExperimentParameters(G=G, T=4, P=2, n_attractors=1)) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=2, n_attractors=1)) # 26, 27 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")], vertex_functions=[None, sympy.And]) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=4, n_attractors=3)) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=4, n_attractors=2)) # 28, 29 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")], vertex_functions=[None, lambda _: True]) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=1)) experiments.append(ILPAttractorExperimentParameters(G=G, T=4, P=2, n_attractors=1)) # 30, 31 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")], vertex_functions=[None, None]) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=6, n_attractors=3)) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=6, n_attractors=2)) # 32, 33, 34, 35, 36 G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A"), ("C", "A")], vertex_functions=[logic.SymmetricThresholdFunction.from_function(sympy.Nand, 2), logic.SymmetricThresholdFunction.from_function(sympy.Nand, 1), logic.SymmetricThresholdFunction.from_function(sympy.Nand, 0)]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=3, n_attractors=3)) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=4, n_attractors=3)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=3)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=4, n_attractors=4)) experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=4, n_attractors=4)) # 37 G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A"), ("C", "A")], vertex_functions=[logic.SymmetricThresholdFunction.from_function(sympy.Nand, 2), logic.SymmetricThresholdFunction.from_function(sympy.Nand, 1), None]) experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=3, n_attractors=3)) # 38, 39, 40 G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand]*3) experiments.append(ILPAttractorExperimentParameters(G=G, T=6, P=2, n_attractors=2)) experiments.append(ILPAttractorExperimentParameters(G=G, T=10, P=10, n_attractors=2)) experiments.append(ILPAttractorExperimentParameters(G=G, T=5, P=10, n_attractors=1)) # 41, 42 # acyclic, should have 2**#input_nodes attractors of length 1 G = graphs.Network(vertex_names=["v1", "v2", "v3", "v4", "v5", "v6"], edges=[("v1", "v4"), ("v2", "v4"), ("v1", "v5"), ("v4", "v6")], vertex_functions=[sympy.Nand]*6) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=10, n_attractors=8)) experiments.append(ILPAttractorExperimentParameters(G=G, T=6, P=10, n_attractors=8)) # 43, 44, 45 G = graphs.Network(vertex_names=["A1", "B1", "B2", "C1", "C2"], edges=[("A1", "A1"), ("B1", "B2"), ("B2", "B1"), ("C1", "C2"), ("C2", "C1")], vertex_functions=[sympy.And]*5) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=10, n_attractors=8)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=18, n_attractors=18)) experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=40, n_attractors=20)) # offsets! # 46, 47, 48 # a failed random graph added as a constant test G = graphs.Network( vertex_names=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34'], edges=[('1', '2'), ('2', '16'), ('3', '17'), ('5', '15'), ('6', '29'), ('7', '28'), ('8', '22'), ('9', '28'), ('10', '18'), ('11', '15'), ('12', '24'), ('13', '14'), ('15', '18'), ('16', '26'), ('17', '27'), ('18', '20'), ('19', '23'), ('20', '27'), ('23', '26'), ('24', '29'), ('25', '33'), ('26', '30'), ('27', '32'), ('28', '32'), ('30', '32'), ('31', '34'), ('32', '33'), ('33', '34')], vertex_functions=[None, None, sympy.Nand, None, None, None, None, None, None, None, None, None, None, None, sympy.Or, sympy.Nand, sympy.Nand, sympy.Nand, sympy.Nand, None, sympy.Xor, None, sympy.And, sympy.Nand, sympy.Xor, None, sympy.And, sympy.Nand, sympy.And, sympy.Xor, sympy.Or, None, sympy.Or, sympy.And, sympy.And]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=6, n_attractors=6)) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=10, n_attractors=10)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=10, n_attractors=10)) # 49, 50, 51 # G = graphs.Network.parse_cnet("C:\\Users\\ariel\\Downloads\\Attractors - for Ariel" # "\\Attractors - for Ariel\\BNS_Dubrova_2011\\MAPK_large2.cnet") # experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=15, n_attractors=12)) # experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=15, n_attractors=14)) # experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=15, n_attractors=14)) G = graphs.Network.parse_cnet("C:\\Users\\ariel\\Downloads\\Attractors - for Ariel" "\\Attractors - for Ariel\\BNS_Dubrova_2011\\tcr.cnet") experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=15, n_attractors=8)) experiments.append(ILPAttractorExperimentParameters(G=G, T=6, P=15, n_attractors=9)) experiments.append(ILPAttractorExperimentParameters(G=G, T=7, P=15, n_attractors=9)) # for _ in range(5): # size = 35 # G = graphs.Network(vertex_names=list(range(size)), # edges=[(i, random.choice(list(range(i+1, size)))) for i in range(size) # if random.random() < 0.8 and i != size-1], # vertex_functions=[random.choice([sympy.And, sympy.Nand, sympy.Or, sympy.Xor]) # for _ in range(size)]) # input_nodes = 0 # for v in G.vertices: # is_input = True # for e in G.edges: # if e[1] == v: # is_input = False # break # if is_input: # input_nodes += 1 # attractor_number = 2**input_nodes # experiments.append(ExperimentParameters(G=G, T=1, P=3, n_attractors=min(3, attractor_number))) # experiments.append(ExperimentParameters(G=G, T=2, P=10, n_attractors=min(10, attractor_number))) # experiments.append(ExperimentParameters(G=G, T=10, P=3, n_attractors=min(3, attractor_number))) # TODO: figure out how disjoint long attractors work together (multiplying doesn't account for offsets) # """test on basic semi-random networks: create connectivity components of acyclis networks and simple cycles""" # n_random_experiment = 0 # while n_random_experiment < 10: # n_components = random.randint(1, 3) # attractor_number = 1 # max_attractor_len = 0 # cur_graph = None # for n_component in range(n_components): # TODO: change to graph union method # comp_size = random.randint(1, 5) # V = [i for i in range(comp_size)] # E = [] # comp_type =random.choice(["cycle", "acyclic"]) # if comp_type == "acyclic": # for i in range(len(V) - 1): # create only forward facing edges # for j in range(i+1, len(V)): # if random.random() <= 0.8: # E.append((V[i], V[j])) # component_graph = graphs.Network(vertex_names=V, edges=E) # restriction_level = random.choice([graphs.FunctionTypeRestriction.NONE, # graphs.FunctionTypeRestriction.SYMMETRIC_THRESHOLD, # graphs.FunctionTypeRestriction.SIMPLE_GATES]) # component_graph.randomize_functions(function_type_restriction=restriction_level) # input_nodes = 0 # for v in V: # is_input = True # for e in E: # if e[1] == v: # is_input = False # break # if is_input: # input_nodes += 1 # attractor_number *= 2**input_nodes # max_attractor_len = max(max_attractor_len, 1) # elif comp_type == "cycle": # """currently supports only a cycle of identity function, using a group theory theorem from # https://www.quora.com/How-many-unique-binary-matrices-are-there-up-to-rotations-translations-and-flips # , can later add negation cycles""" # for i in range(len(V)): # E.append((V[i], V[(i + 1) % len(V)])) # component_graph = graphs.Network(vertex_names=V, edges=E, vertex_functions=[sympy.And]*len(V)) # attractor_number *= binary_necklaces(len(V)) # max_attractor_len = max(max_attractor_len, len(V)) # cur_graph = component_graph if cur_graph is None else cur_graph + component_graph # if attractor_number * len(cur_graph.vertices) * max_attractor_len <= 250: # experiments.append(ExperimentParameters(G=cur_graph, T=max_attractor_len, # P=attractor_number + 1, # n_attractors=attractor_number)) # n_random_experiment += 1 print "number of experiments (with keys)={}".format(len(experiments)) for i, experiment in enumerate(experiments): print "experiment #{}".format(i) print "n={}, T={}, P={}, expected_n_attractors={}".format(len(experiment.G.vertices), experiment.T, experiment.P, experiment.n_attractors) # continue use_sampling = bool(random.randint(0, 1)) use_sampling_for_mip_start = bool(random.randint(0, 1)) simplify = bool(random.randint(0, 1)) key_slice_size = random.randint(1, 15) print "key_slice_size={}".format(key_slice_size) n_attractors = find_num_attractors_onestage(G=experiment.G, max_len=experiment.T, max_num=experiment.P, use_sat=False, verbose=False, sampling_bounds=(3, 3) if use_sampling else None, use_sampling_for_mip_start=use_sampling_for_mip_start, simplify_general_boolean=simplify, key_slice_size=key_slice_size) try: self.assertEqual(n_attractors, experiment.n_attractors) except AssertionError as e: print e print experiment.G raise e except Exception as e: raise e # print "number of experiments (without keys)={}".format(len(experiments)) # for i, experiment in enumerate(experiments): # print "experiment #{}".format(i)h # print "n={}, T={}, P={}, expected_n_attractors={}".format(len(experiment.G.vertices), # experiment.T, experiment.P, experiment.n_attractors) # # continue # n_attractors = find_num_attractors_onestage(G=experiment.G, max_len=experiment.T, max_num=experiment.P, # use_sat=False, verbose=False, # use_state_keys=False, require_result=experiment.n_attractors) # try: # self.assertEqual(n_attractors, experiment.n_attractors) # except AssertionError as e: # print e # print experiment.G # raise e def test_vertex_degeneracy_scores(self): self.assertTrue(False) # TODO: write... def test_graph_state_impact_scores(self): experiments = [] G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.Nand]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #0 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=1, maximal_bits=1, impact=0)) # experiment #1 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, maximal_bits=1, impact=0)) # experiment #2 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=30, maximal_bits=1, impact=0)) # experiment #3 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=30, maximal_bits=10, impact=0)) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[sympy.Nand, None]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #4 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=1, maximal_bits=1, impact=0)) # experiment #5 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, maximal_bits=1, impact=0)) # experiment #6 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=30, maximal_bits=1, impact=0)) # experiment #7 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=30, maximal_bits=10, impact=0)) G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #8 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=5, maximal_bits=1, impact=1)) # experiment #9 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, maximal_bits=1, impact=1)) # experiment #10 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=5, maximal_bits=5, impact=1)) # experiment #11 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=[0.1, 0.9], max_transient_len=5, maximal_bits=5, impact=1)) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[sympy.And, None]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #12 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=[0.1, 0.9], max_transient_len=5, maximal_bits=5, impact=1)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #13 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, maximal_bits=1, impact=1)) # experiment #14 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=[0.1, 0.9], max_transient_len=5, maximal_bits=5, impact=1)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #15 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, maximal_bits=1, impact=1)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #16 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, maximal_bits=1, impact=0)) # experiment #17 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, maximal_bits=3, impact=0)) # experiment #18 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=5, maximal_bits=2, impact=0)) G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("B", "A"), ("C", "A"), ("D", "A"), ("A", "B"), ("C", "B"), ("D", "B"), ("A", "C"), ("B", "C"), ("D", "C"), ("A", "D"), ("B", "D"), ("C", "D")], vertex_functions=[lambda a, b, c: a + b + c > 1 for _ in range(4)]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # 0000 and 1111 are stable points, and attract everything with hamming distance <= 1, # where 2 bits of change land right into another attractor. # Other three two-state attractors are unstable under one bit change, with transient length of 1, # Or they can be switched between eachother/stables with 2 (same as 0000/1111 ones, if needed) # bits of change. # experiment #19 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, maximal_bits=1, impact=0)) # experiment #20 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=1, maximal_bits=1, impact=3 / 5.0)) # experiment #21 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=5, maximal_bits=1, impact=3 / 5.0)) # experiment #22 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, maximal_bits=2, impact=1)) # experiment #23 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=3, maximal_bits=2, impact=1)) relative_basins = [5 / float(16) if len(attractor) == 1 else 2 / float(16) for attractor in current_attractors] # experiment #24 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=relative_basins, max_transient_len=5, maximal_bits=1, impact=6 / 16.0)) # experiment #25 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=relative_basins, max_transient_len=0, maximal_bits=2, impact=1)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "C")], vertex_functions=[None, sympy.And, sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #19 # 000, 110 and 111 are the steady states. First is stable, other can change on # right vertex change, B with one step and C immediately. # experiment #26 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, maximal_bits=1, impact=2 / 3.0)) # experiment #27 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, maximal_bits=2, impact=2 / 3.0)) # experiment #28 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=5, maximal_bits=5, impact=2 / 3.0)) relative_len_decider = lambda attractor: 0.5 if [ int(s) for s in attractor[0]] == [0, 0, 0] else 3 / float(8) if [ int(s) for s in attractor[0]] == [1, 1, 0] else 1 / float(8) relative_basins = [relative_len_decider(att) for att in current_attractors] # experiment #29 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=relative_basins, max_transient_len=5, maximal_bits=2, impact=0.5)) # experiment #30 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=relative_basins, max_transient_len=0, maximal_bits=1, impact=0.5)) G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("A", "B"), ("B", "C"), ("C", "D"), ("D", "D")], vertex_functions=[None, sympy.And, sympy.And, sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # Now 0000 is stable, 1110 changes immediently on last vertex change, 1111 can change in 2, 1, or 0 # steps on change of second, third or last vertex. # experiment #31 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, maximal_bits=1, impact=2 / 3.0)) # experiment #31 experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=3, maximal_bits=3, impact=2 / 3.0)) print "number of experiments (with keys)={}".format(len(experiments)) for i, experiment in enumerate(experiments): print "experiment #{}".format(i) print "attractors:" print experiment.current_attractors print "n={}, relative_basins={}, expected_impacts={}".\ format(len(experiment.G.vertices), experiment.relative_basins, experiment.impact) impact = graph_state_impact_score(G=experiment.G, current_attractors=experiment.current_attractors, max_transient_len=experiment.max_transient_len, relative_attractor_basin_sizes=experiment.relative_basins, key_slice_size=15, maximal_bits_of_change=experiment.maximal_bits) # (from vertex version) got numeric problems with test #16 regardless of key_slice impact = round(impact, 5) experiment_impact = round(experiment.impact, 5) print "expected impact:" print experiment_impact print "got impact:" print impact try: self.assertEqual(impact, experiment_impact) except AssertionError as e: print e print experiment.G raise e def test_vertex_state_impact_scores(self): # TODO: test stochastic kind experiments = [] G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.Nand]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #0 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=1, impacts=[0])) # experiment #1 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, impacts=[0])) # experiment #2 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=30, impacts=[0])) # experiment #3 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=[1], max_transient_len=30, impacts=[0])) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[sympy.Nand, None]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #4 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=30, impacts=[0, np.nan])) G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #5 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=30, impacts=[1])) # experiment #6 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=30, impacts=[1])) # experiment #7 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=30, impacts=[1])) # experiment #8 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=[0.1, 0.9], max_transient_len=1, impacts=[1])) # experiment #9 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=[0.1, 0.9], max_transient_len=0, impacts=[1])) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[sympy.And, None]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #10 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, impacts=[1, np.nan])) # experiment #11 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=[0.1, 0.4, 0.4, 0.1], max_transient_len=0, impacts=[1, np.nan])) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #12 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, impacts=[1] * 3)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #13 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, impacts=[1, 1, 1])) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #14 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, impacts=[0, 0, 0])) G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("B", "A"), ("C", "A"), ("D", "A"), ("A", "B"), ("C", "B"), ("D", "B"), ("A", "C"), ("B", "C"), ("D", "C"), ("A", "D"), ("B", "D"), ("C", "D")], vertex_functions=[lambda a, b, c: a + b + c > 1 for _ in range(4)]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #15 # 0000 and 1111 are stable points, and attract everything with hamming distance <= 1. # Other three two-state attractors are unstable under one bit change, with transient length of 1. experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, impacts=[0] * 4)) # experiment #16 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=1, impacts=[3 / 5.0] * 4)) # experiment #17 relative_basins = [5 / float(16) if len(attractor) == 1 else 2 / float(16) for attractor in current_attractors] experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=relative_basins, max_transient_len=1, impacts=[6 / 16.0, 6 / 16.0, 6 / 16.0, 6 / 16.0])) # experiment #18 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=relative_basins, max_transient_len=2, impacts=[6 / 16.0, 6 / 16.0, 6 / 16.0, 6 / 16.0])) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "C")], vertex_functions=[None, sympy.And, sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #19 # 000, 110 and 111 are the steady states. First is stable, other can change on # right vertex change, B with one step and C immediately. experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, impacts=[np.nan, 0, 2 / 3.0])) # experiment #20 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=1, impacts=[np.nan, 1 / 3.0, 2/ 3.0])) relative_len_decider = lambda attractor: 0.5 if [ int(s) for s in attractor[0]] == [0, 0, 0] else 3 / float(8) if [ int(s) for s in attractor[0]] == [1, 1, 0] else 1 / float(8) relative_basins = [relative_len_decider(att) for att in current_attractors] # experiment #21 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=relative_basins, max_transient_len=1, impacts=[np.nan, 1 / 8.0, 0.5])) G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("A", "B"), ("B", "C"), ("C", "D"), ("D", "D")], vertex_functions=[None, sympy.And, sympy.And, sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # Now 0000 is stable, 1110 changes immediently on last vertex change, 1111 can change in 2, 1, or 0 # steps on change of second, third or last vertex. # experiment #22 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=0, impacts=[np.nan, 0, 0, 2 / float(3)])) # experiment #23 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=1, impacts=[np.nan, 0, 1 / float(3), 2 / float(3)])) # experiment #24 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=2, impacts=[np.nan, 1 / float(3), 1 / float(3), 2 / float(3)])) # experiment #25 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=3, impacts=[np.nan, 1 / float(3), 1 / float(3), 2 / float(3)])) # experiment #26 experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors, relative_basins=None, max_transient_len=30, impacts=[np.nan, 1 / float(3), 1 / float(3), 2 / float(3)])) print "number of experiments (with keys)={}".format(len(experiments)) for i, experiment in enumerate(experiments): print "experiment #{}".format(i) print "attractors:" print experiment.current_attractors print "n={}, relative_basins={}, expected_impacts={}".\ format(len(experiment.G.vertices), experiment.relative_basins, experiment.impacts) impacts = vertex_state_impact_scores(G=experiment.G, current_attractors=experiment.current_attractors, max_transient_len=experiment.max_transient_len, relative_attractor_basin_sizes=experiment.relative_basins, key_slice_size=15) # got numeric problems with test #16 regardless of key_slice impacts = [round(x, 5) if not np.isnan(x) else x for x in impacts] experiment_impacts = [round(x, 5) if not np.isnan(x) else x for x in experiment.impacts] print "expected impacts:" print impacts print "got impacts:" print experiment_impacts try: self.assertEqual(impacts, experiment_impacts) except AssertionError as e: print e print experiment.G raise e def test_graph_model_impact_scores(self): # TODO: also test the resulting models (assure they have the correct number of attractors) experiments = [] G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.Nand]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #0 experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Invalidation, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impact=1)) # experiment #1 experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impact=1)) # experiment #2 experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=2)) # experiment #3 experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=1.5)) # experiment #4 experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=1, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=1)) # experiment #5 experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both, maximal_bits=2, current_attractors=current_attractors, relative_basins=[1], impact=1.5)) G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #6 experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impact=0.5)) # experiment #7 experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation, maximal_bits=1, current_attractors=current_attractors, relative_basins=[0.1, 0.9], impact=0.9)) # experiment #8 experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=1)) # experiment #9 experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both, maximal_bits=2, current_attractors=current_attractors, relative_basins=[0.1, 0.9], impact=0.75)) # experiment #10 experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=0.5)) # experiment #11 experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=0)) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[sympy.And, None]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #12 experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=1)) # experiment #13 experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both, maximal_bits=2, current_attractors=current_attractors, relative_basins=[0.1, 0.4, 0.4, 0.1], impact=0.75)) # experiment #14 experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=0.5)) # experiment #15 experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=1, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=0.25)) # experiment #16 experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=0)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #17 experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impact=1)) # experiment #18 experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=1)) # experiment #19 experiments.append(GraphModelImpactExperimentParameters(G=G, T=6, P=5, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=2)) # experiment #20 experiments.append(GraphModelImpactExperimentParameters(G=G, T=6, P=3, impact_types=ImpactType.Both, maximal_bits=2, current_attractors=current_attractors, relative_basins=[0.1, 0.9], impact=1.25)) # experiment #21 experiments.append(GraphModelImpactExperimentParameters(G=G, T=6, P=5, impact_types=ImpactType.Both, maximal_bits=2, current_attractors=current_attractors, relative_basins=[0.1, 0.9], impact=1.5)) # experiment #22 experiments.append(GraphModelImpactExperimentParameters(G=G, T=6, P=5, impact_types=ImpactType.Addition, maximal_bits=1, current_attractors=current_attractors, relative_basins=[0.1, 0.9], impact=0.5)) # experiment #23 experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=0.5)) # experiment #24 experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=5, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=1)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #25 experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impact=0.75)) # experiment #26 experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=1)) # experiment #27 experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=0.5)) # experiment #28 experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Both, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=0.75)) # experiment #29 experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Addition, maximal_bits=3, current_attractors=current_attractors, relative_basins=None, impact=0.5)) # experiment #30 experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Addition, maximal_bits=4, current_attractors=current_attractors, relative_basins=None, impact=1)) # experiment #31 experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=5, impact_types=ImpactType.Addition, maximal_bits=4, current_attractors=current_attractors, relative_basins=None, impact=0.5)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #32 experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impact=1)) # experiment #33 experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=3, impact_types=ImpactType.Addition, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impact=3)) # experiment #34 experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=6, impact_types=ImpactType.Addition, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impact=3)) # experiment #35 experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=6, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=3)) # experiment #36 experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=6, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impact=1)) # experiment #36 experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=6, impact_types=ImpactType.Addition, maximal_bits=3, current_attractors=current_attractors, relative_basins=None, impact=4)) print "number of experiments (with keys)={}".format(len(experiments)) for i, experiment in enumerate(experiments): print "experiment #{}".format(i) print "n={}, T={}, P={}, maximal_bits={}, relative_basins={}, expected_impact={}".\ format(len(experiment.G.vertices), experiment.T, experiment.P, experiment.maximal_bits, experiment.relative_basins, experiment.impact) print experiment.current_attractors impact = graph_model_impact_score(G=experiment.G, current_attractors=experiment.current_attractors, max_len=experiment.T, max_num=experiment.P, impact_types=experiment.impact_types, relative_attractor_basin_sizes=experiment.relative_basins, maximal_bits_of_change=experiment.maximal_bits) try: self.assertEqual(impact, experiment.impact) except AssertionError as e: print e print experiment.G raise e def test_vertex_model_impact_scores(self): # TODO: also test the resulting models (assure they have the correct number of attractors) # TODO: test stochastic kind experiments = [] G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.Nand]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #0 experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Invalidation, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impacts=[1])) # experiment #1 experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impacts=[1])) # experiment #2 experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[2])) # experiment #3 experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[1.5])) # experiment #4 experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=1, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[1])) # experiment #5 experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both, maximal_bits=2, current_attractors=current_attractors, relative_basins=[1], impacts=[1.5])) G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #6 experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impacts=[0.5])) # experiment #7 experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation, maximal_bits=1, current_attractors=current_attractors, relative_basins=[0.1, 0.9], impacts=[0.9])) # experiment #8 experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[1])) # experiment #9 experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both, maximal_bits=2, current_attractors=current_attractors, relative_basins=[0.1, 0.9], impacts=[0.75])) # experiment #10 experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[0.5])) # experiment #11 experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[0])) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[sympy.And, None]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #12 experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[1, np.nan])) # experiment #13 experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both, maximal_bits=2, current_attractors=current_attractors, relative_basins=[0.1, 0.4, 0.4, 0.1], impacts=[0.75, np.nan])) # experiment #14 experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[0.5, np.nan])) # experiment #15 experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=1, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[0.25, np.nan])) # experiment #16 experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[0, np.nan])) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #17 experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impacts=[1] * 3)) # experiment #18 experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[1] * 3)) # experiment #19 experiments.append(VertexModelImpactExperimentParameters(G=G, T=6, P=5, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[2] * 3)) # experiment #20 experiments.append(VertexModelImpactExperimentParameters(G=G, T=6, P=3, impact_types=ImpactType.Both, maximal_bits=2, current_attractors=current_attractors, relative_basins=[0.1, 0.9], impacts=[1.25] * 3)) # experiment #21 experiments.append(VertexModelImpactExperimentParameters(G=G, T=6, P=5, impact_types=ImpactType.Both, maximal_bits=2, current_attractors=current_attractors, relative_basins=[0.1, 0.9], impacts=[1.5] * 3)) # experiment #22 experiments.append(VertexModelImpactExperimentParameters(G=G, T=6, P=2, impact_types=ImpactType.Addition, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impacts=[0.5] * 3)) # experiment #23 experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[0.5] * 3)) # experiment #24 experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=5, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[1] * 3)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #25 experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impacts=[0.75, 0.75, 0.75])) # experiment #26 experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[1, 1, 1])) # experiment #27 experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[0.5, 0.5, 0.5])) # experiment #28 experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Both, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[0.75, 0.75, 0.75])) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #29 experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impacts=[1, 1, 1])) # experiment #30 experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Addition, maximal_bits=1, current_attractors=current_attractors, relative_basins=None, impacts=[1, 1, 3])) # experiment #31 experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Addition, maximal_bits=2, current_attractors=current_attractors, relative_basins=None, impacts=[1, 1, 3])) print "number of experiments (with keys)={}".format(len(experiments)) for i, experiment in enumerate(experiments): print "experiment #{}".format(i) print "n={}, T={}, P={}, maximal_bits={}, relative_basins={}, expected_impacts={}".\ format(len(experiment.G.vertices), experiment.T, experiment.P, experiment.maximal_bits, experiment.relative_basins, experiment.impacts) print experiment.current_attractors impacts = vertex_model_impact_scores(G=experiment.G, current_attractors=experiment.current_attractors, max_len=experiment.T, max_num=experiment.P, impact_types=experiment.impact_types, relative_attractor_basin_sizes=experiment.relative_basins, maximal_bits_of_change=experiment.maximal_bits) try: self.assertEqual(impacts, experiment.impacts) except AssertionError as e: print e print experiment.G raise e def test_stochastic_graph_state_impact_scores(self): experiments = [] G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.Nand]) # experiment #0 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=0)) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[sympy.Nand, None]) # experiment #1 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=0)) G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.And]) # experiment #2 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=1)) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[sympy.And, None]) # experiment #3 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=1)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand]) # experiment #4 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=0.5)) # experiment #5 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=2, impact=0.5)) # experiment #6 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=3, impact=0)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.And]) # experiment #7 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=1)) # experiment #8 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=2, impact=0.5)) # experiment #9 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=3, impact=1)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True]) # experiment #10 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=0)) # experiment #11 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=2, impact=0)) # experiment #12 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=3, impact=0)) G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("B", "A"), ("C", "A"), ("D", "A"), ("A", "B"), ("C", "B"), ("D", "B"), ("A", "C"), ("B", "C"), ("D", "C"), ("A", "D"), ("B", "D"), ("C", "D")], vertex_functions=[lambda a, b, c: a + b + c > 1 for _ in range(4)]) # experiment #13 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=3 / 8.0)) # experiment #14 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=2, impact=1)) # experiment #15 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=3, impact=1)) # experiment #16 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=4, impact=10 / 16.0)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "C")], vertex_functions=[None, sympy.And, sympy.And]) # 000, 110 and 111 are the steady states. First is stable, other can change on # right vertex change, B with one step and C immediately. # experiment #17 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=(3 / 8.0 * 0) + (3 / 8.0 * 0.5) + (1 / 8.0 * 0.5) + (1 / 8.0 * 0))) # experiment #18 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=2, impact=1 / 16.0)) G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("A", "B"), ("B", "C"), ("C", "D"), ("D", "D")], vertex_functions=[None, sympy.And, sympy.And, sympy.And]) # Now 0000 is stable, 1110 changes immediently on last vertex change, 1111 can change in 2, 1, or 0 # steps on change of second, third or last vertex. # experiment #19 experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=0.20833333333)) print "number of experiments (with keys)={}".format(len(experiments)) for i, experiment in enumerate(experiments): print "experiment #{}".format(i) print "n={}, expected_impact={}".\ format(len(experiment.G.vertices), experiment.impact) for iteration in range(10): n_iter = random.randint(700, 1400) parallel_n_jobs = random.choice([None, 1, 2, 3]) estimated_impact = stochastic_graph_state_impact_score(G=experiment.G, n_iter=n_iter, bits_of_change=experiment.bits_of_change, parallel_n_jobs=parallel_n_jobs) print "estimated_impact={}".format(estimated_impact) self.assertTrue(abs(estimated_impact - experiment.impact) < 0.1) def test_stochastic_vertex_state_impact_scores(self): experiments = [] G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.Nand]) # experiment #0 experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[0])) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[sympy.Nand, None]) # experiment #1 experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[0, np.nan])) G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.And]) # experiment #2 experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[1])) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[sympy.And, None]) # experiment #3 experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[1, np.nan])) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand]) # experiment #4 experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[0.5] * 3)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.And]) # experiment #5 experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[1, 1, 1])) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True]) # experiment #6 experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[0, 0, 0])) G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("B", "A"), ("C", "A"), ("D", "A"), ("A", "B"), ("C", "B"), ("D", "B"), ("A", "C"), ("B", "C"), ("D", "C"), ("A", "D"), ("B", "D"), ("C", "D")], vertex_functions=[lambda a, b, c: a + b + c > 1 for _ in range(4)]) # experiment #7 experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[3 / 8.0] * 4)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "C")], vertex_functions=[None, sympy.And, sympy.And]) # experiment #8 # 000, 110 and 111 are the steady states. First is stable, other can change on # right vertex change, B with one step and C immediately. experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[np.nan, 1/8.0, 0.5])) G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("A", "B"), ("B", "C"), ("C", "D"), ("D", "D")], vertex_functions=[None, sympy.And, sympy.And, sympy.And]) # Now 0000 is stable, 1110 changes immediently on last vertex change, 1111 can change in 2, 1, or 0 # steps on change of second, third or last vertex. # experiment #9 experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[np.nan, 1/16.0, 1/16.0, 0.5])) print "number of experiments (with keys)={}".format(len(experiments)) for i, experiment in enumerate(experiments): print "experiment #{}".format(i) print "n={}, expected_impacts={}".\ format(len(experiment.G.vertices), experiment.impacts) for iteration in range(10): n_iter = random.randint(700, 1400) parallel_n_jobs = random.choice([None, 1, 2, 3]) estimated_impacts = stochastic_vertex_state_impact_scores(G=experiment.G, n_iter=n_iter, parallel_n_jobs=parallel_n_jobs) print "estimated_impacts={}".format(estimated_impacts) self.assertTrue(len(experiment.impacts) == len(estimated_impacts)) for calculated_impact, estimated_impact in zip(experiment.impacts, estimated_impacts): if np.isnan(calculated_impact): self.assertTrue(np.isnan(estimated_impact)) else: self.assertTrue(abs(estimated_impact - calculated_impact) < 0.1) def test_stochastic_graph_model_impact_scores(self): # TODO: also test the resulting models (assure they have the correct number of attractors) experiments = [] G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.Nand]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #0 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impact=1)) # experiment #1 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impact=1)) # experiment #2 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impact=1)) # experiment #3 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impact=2)) # experiment #4 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Both, impact=1.5)) G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #5 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impact=0.5)) # experiment #6 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=[0.1, 0.9], impact_type=ImpactType.Invalidation, impact=0.5)) # experiment #7 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impact=1)) # experiment #8 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impact=0)) # experiment #9 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impact=0.5)) # experiment #10 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Both, impact=0.75)) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[sympy.And, None]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #11 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impact=0.5)) # experiment #12 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impact=1)) # experiment #13 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impact=0.5)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #14 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impact=1)) # experiment #15 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impact=1)) # experiment #16 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impact=0.5)) # experiment #17 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impact=(3 / 15.0) * 2 + (12 / 15.0) * 0.5)) # experiment #18 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=3, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impact=0.5)) # experiment #19 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=4, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impact=(3 / 15.0) * 1 + (12 / 15.0) * 0.5)) # experiment #20 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Both, impact=0.75)) # experiment #21 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Both, impact=(3 / 15.0) * 1.5 + (12 / 15.0) * 0.75)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #22 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impact=3 / 4.0)) # experiment #23 basin_sizes = [3 / 8.0 if len(att) > 1 else 1 / 8.0 for att in current_attractors] experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=basin_sizes, impact_type=ImpactType.Invalidation, impact=7 / 8.0)) # experiment #24 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impact=(3 / 15.0) * 1 + (12 / 15.0) * (0.5 * 3 / 4.0 + 0.5 * 1))) # experiment #25 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impact=0)) # experiment #26 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impact=(3 / 15.0) * 0.5 + (12 / 15.0) * (0.5 * 0 + 0.5 * 0.25))) # experiment #27 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Both, impact=7 / 16.0)) # experiment #28 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Both, impact=(3 / 15.0) * 0.75 + (12 / 15.0) * (0.5 * (3/8.0 + 0) + 0.5 * (3/8.0 + 0.125)))) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #29 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impact=0.5)) # experiment #30 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impact=(3 / 15.0) * 1 + (12 / 15.0) * 3 / 4.0)) # experiment #31 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impact=(2 / 3.0 * 0.5 + 1 / 3.0 * 2.5))) # experiment #32 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Both, impact=(2 / 3.0 * 0.5 + 1 / 3.0 * ( 0.5 * 1.5 + 0.5 * 1.5)))) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A"), ("B", "B")], vertex_functions=[sympy.And, sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #33 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impact=(1 / 3.0 * 0.5 + 2 / 3.0 * 0.25))) # experiment #34 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=[0.1, 0.9], impact_type=ImpactType.Invalidation, impact=(1 / 3.0 * 0.5 + 2 / 3.0 * 0.25))) # experiment #35 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impact=(1 / 15.0 * 1 + 6 / 15.0 * 3.5 / 6.0 + 8 / 15.0 * 5 / 8.0))) # experiment #36 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impact=(1 / 3.0 * 0.25 + 2 / 3.0 * 1 / 8.0))) # experiment #37 experiments.append(StochasticGraphModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impact=(1 / 15.0 * 0.5 + 6 / 15.0 * 1 / 4.0 + 8 / 15.0 * 2 * 0.5 / 8.0))) print "number of experiments (with keys)={}".format(len(experiments)) for i, experiment in enumerate(experiments): print "experiment #{}".format(i) print "n={}, bits_of_change={}, relative_basins={}, impact_type={}, expected_impact={}".\ format(len(experiment.G.vertices), experiment.bits_of_change, experiment.relative_basins, experiment.impact_type, experiment.impact) print experiment.current_attractors for use_dubrova in [False, True]: n_iter = random.randint(800, 880) attractor_estimation_n_iter = random.randint(50, 55) parallel_n_jobs = random.choice([None, 1, 2, 3]) estimated_impact = stochastic_graph_model_impact_score( G=experiment.G, current_attractors=experiment.current_attractors, n_iter=n_iter, use_dubrova=use_dubrova, bits_of_change=experiment.bits_of_change, relative_attractor_basin_sizes=experiment.relative_basins, attractor_estimation_n_iter=attractor_estimation_n_iter, impact_type=experiment.impact_type, cur_dubrova_path=dubrova_path, parallel_n_jobs=parallel_n_jobs) print "estimated_impact={}".format(estimated_impact) print "expected_impacts={}".format(experiment.impact) self.assertTrue(abs(estimated_impact - experiment.impact) < 0.15) def test_stochastic_vertex_model_impact_scores(self): # TODO: also test the resulting models (assure they have the correct number of attractors) experiments = [] G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.Nand]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #0 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impacts=[1])) # experiment #1 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impacts=[1])) # experiment #2 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impacts=[1])) # experiment #3 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impacts=[2])) # experiment #4 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Both, impacts=[1.5])) G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #5 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impacts=[0.5])) # experiment #6 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=[0.1, 0.9], impact_type=ImpactType.Invalidation, impacts=[0.5])) # experiment #7 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impacts=[1])) # experiment #8 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impacts=[0])) # experiment #9 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impacts=[0.5])) # experiment #10 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Both, impacts=[0.75])) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[sympy.And, None]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #11 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impacts=[0.5, np.nan])) # experiment #12 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impacts=[1, np.nan])) # experiment #13 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impacts=[0.5, np.nan])) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #14 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impacts=[1] * 3)) # experiment #15 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impacts=[1] * 3)) # experiment #16 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impacts=[0.5] * 3)) # experiment #17 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impacts=[2] * 3)) # experiment #18 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Both, impacts=[0.75] * 3)) # experiment #19 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Both, impacts=[1.5] * 3)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #20 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impacts=[3 / 4.0] * 3)) # experiment #21 basin_sizes = [3 / 8.0 if len(att) > 1 else 1 / 8.0 for att in current_attractors] experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=basin_sizes, impact_type=ImpactType.Invalidation, impacts=[7 / 8.0] * 3)) # experiment #22 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impacts=[1, 1, 1])) # experiment #23 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impacts=[0] * 3)) # experiment #24 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impacts=[0.5, 0.5, 0.5])) # experiment #25 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Both, impacts=[7 / 16.0] * 3)) # experiment #26 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Both, impacts=[0.75] * 3)) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #27 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impacts=[0.5] * 3)) # experiment #28 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impacts=[1] * 3)) # experiment #29 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impacts=[0.5, 0.5, 2.5])) # experiment #30 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impacts=[1, 1, 1])) # experiment #31 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Both, impacts=[0.5, 0.5, 1.5])) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A"), ("B", "B")], vertex_functions=[sympy.And, sympy.And]) current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True) # experiment #32 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impacts=[0.5, 0.25])) # experiment #33 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=[0.1, 0.9], impact_type=ImpactType.Invalidation, impacts=[0.5, 0.25])) # experiment #34 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Invalidation, impacts=[1, 0.5])) # experiment #35 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impacts=[0.25, 1 / 8.0])) # experiment #36 experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2, current_attractors=current_attractors, relative_basins=None, impact_type=ImpactType.Addition, impacts=[0.5, 1 / 4.0])) print "number of experiments (with keys)={}".format(len(experiments)) for i, experiment in enumerate(experiments): print "experiment #{}".format(i) print "n={}, bits_of_change={}, relative_basins={}, impact_type={}, expected_impacts={}".\ format(len(experiment.G.vertices), experiment.bits_of_change, experiment.relative_basins, experiment.impact_type, experiment.impacts) print experiment.current_attractors for use_dubrova in [False, True]: n_iter = random.randint(400, 440) attractor_estimation_n_iter = random.randint(30, 35) parallel_n_jobs = random.choice([None, 1, 2, 3]) estimated_impacts = stochastic_vertex_model_impact_scores( G=experiment.G, current_attractors=experiment.current_attractors, n_iter=n_iter, use_dubrova=use_dubrova, bits_of_change=experiment.bits_of_change, relative_attractor_basin_sizes=experiment.relative_basins, attractor_estimation_n_iter=attractor_estimation_n_iter, impact_type=experiment.impact_type, cur_dubrova_path=dubrova_path, parallel_n_jobs=parallel_n_jobs) self.assertTrue(len(experiment.impacts) == len(estimated_impacts)) print "estimated_impacts={}".format(estimated_impacts) for calculated_impact, estimated_impact in zip(experiment.impacts, estimated_impacts): if np.isnan(calculated_impact): self.assertTrue(np.isnan(estimated_impact)) else: self.assertTrue(abs(estimated_impact - calculated_impact) < 0.15) def test_find_num_steady_states(self): """test on known toy models""" # 0, 1 G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.Nand]) self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 0) self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=True), 0) G = graphs.Network(vertex_names=["A"], edges=[], vertex_functions=[None]) self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 2) self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=True), 2) G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.And]) self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 2) self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=True), 2) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")], vertex_functions=[sympy.Nand, sympy.And]) self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 0) self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=True), 0) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")], vertex_functions=[sympy.Nand, sympy.Nand]) self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 2) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")], vertex_functions=[lambda x: True, lambda x: False]) self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 1) self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=True), 1) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand]*3) self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 0) G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("A", "B"), ("B", "C"), ("C", "D"), ("D", "A")], vertex_functions=[sympy.Nand]*4) self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 2) # acyclic, should have 2**#input_nodes attractors of length 1 G = graphs.Network(vertex_names=["v1", "v2", "v3", "v4", "v5", "v6"], edges=[("v1", "v4"), ("v2", "v4"), ("v1", "v5"), ("v4", "v6")], vertex_functions=[sympy.Nand]*6) self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 8) G = graphs.Network(vertex_names=["A1", "B1", "B2", "C1", "C2"], edges=[("A1", "A1"), ("B1", "B2"), ("B2", "B1"), ("C1", "C2"), ("C2", "C1")], vertex_functions=[sympy.And]*5) G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand]*3) self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 0) G = graphs.Network.parse_cnet("C:\\Users\\ariel\\Downloads\\Attractors - for Ariel" "\\Attractors - for Ariel\\BNS_Dubrova_2011\\tcr.cnet") self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 8) def test_find_attractors_dubrova(self): experiments = [] """test on known toy models""" # 0, 1 G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.Nand]) experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=1)) experiments.append(DubrovaExperimentParameters(G=G, mutate=True, n_attractors=1)) # 2 G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[logic.SymmetricThresholdFunction(signs=[-1], threshold=1)]) experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=1)) # 3, 4 G = graphs.Network(vertex_names=["A"], edges=[], vertex_functions=[None]) experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=1)) experiments.append(DubrovaExperimentParameters(G=G, mutate=True, n_attractors=2)) # 5, 6 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")], vertex_functions=[sympy.Nand, sympy.And]) experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=1)) experiments.append(DubrovaExperimentParameters(G=G, mutate=True, n_attractors=1)) # 7, 8 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")], vertex_functions=[lambda x: True, lambda x: False]) experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=1)) experiments.append(DubrovaExperimentParameters(G=G, mutate=True, n_attractors=1)) # 9, 10 G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A"), ("C", "A")], vertex_functions=[logic.SymmetricThresholdFunction.from_function(sympy.Nand, 2), logic.SymmetricThresholdFunction.from_function(sympy.Nand, 1), True]) experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=3)) experiments.append(DubrovaExperimentParameters(G=G, mutate=True, n_attractors=3)) # 11, 12 G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A"), ("C", "A")], vertex_functions=[logic.SymmetricThresholdFunction.from_function(sympy.Nand, 2), logic.SymmetricThresholdFunction.from_function(sympy.Nand, 1), False]) experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=1)) experiments.append(DubrovaExperimentParameters(G=G, mutate=True, n_attractors=1)) # 13, 14 G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A"), ("C", "A")], vertex_functions=[logic.SymmetricThresholdFunction.from_function(sympy.Nand, 2), logic.SymmetricThresholdFunction.from_function(sympy.Nand, 1), None]) experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=1)) experiments.append(DubrovaExperimentParameters(G=G, mutate=True, n_attractors=4)) # 15 G = graphs.Network.parse_cnet("C:\\Users\\ariel\\Downloads\\Attractors - for Ariel" "\\Attractors - for Ariel\\BNS_Dubrova_2011\\tcr.cnet") # G = graphs.Network.parse_cnet("C:\\Users\\ariel\\Downloads\\Attractors - for Ariel" # "\\Attractors - for Ariel\\BNS_Dubrova_2011\\MAPK_large.cnet") experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=9)) print "number of experiments (with keys)={}".format(len(experiments)) for i, experiment in enumerate(experiments): print "experiment #{}".format(i) print "n={}, mutate={}, expected_n_attractors={}".format(len(experiment.G.vertices), experiment.mutate, experiment.n_attractors) # continue attractors = find_attractors_dubrova(G=experiment.G, dubrova_path="../bns_dubrova.exe", mutate_input_nodes=experiment.mutate) n_attractors = len(attractors) try: self.assertEqual(n_attractors, experiment.n_attractors) except AssertionError as e: print e print experiment.G raise e except Exception as e: raise e print "testing state order in attractor" # TODO: expand? random graphs, compare ILP attractors with Dubrova's G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A")], vertex_functions=[sympy.And, sympy.Nand, True]) desired_attractor = [[0, 0, 1], [0, 1, 1], [1, 1, 1], [1, 0, 1]] # repeat manually, (otherwise there's mutual dependence of tests). possible_attractors = [desired_attractor[shift:] + desired_attractor[:shift] for shift in range(4)] # print possible_attractors found_attractors = find_attractors_dubrova(G, dubrova_path="../bns_dubrova.exe", mutate_input_nodes=True) self.assertTrue(len(found_attractors) == 1) found_attractor = [[int(v) for v in state] for state in found_attractors[0]] # print found_attractor self.assertTrue(any(found_attractor == possible_attractors[i] for i in range(len(possible_attractors)))) G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")], vertex_functions=[sympy.And, sympy.Nand]) desired_attractor = [[0, 0], [0, 1], [1, 1], [1, 0]] # repeat manually, (otherwise there's mutual dependence of tests). possible_attractors = [desired_attractor[shift:] + desired_attractor[:shift] for shift in range(4)] # print possible_attractors found_attractors = find_attractors_dubrova(G, dubrova_path="../bns_dubrova.exe", mutate_input_nodes=True) self.assertTrue(len(found_attractors) == 1) found_attractor = [[int(v) for v in state] for state in found_attractors[0]] # print found_attractor self.assertTrue(any(found_attractor == possible_attractor for possible_attractor in possible_attractors)) def test_find_attractors_enumerate(self): experiments = [] """test on known toy models""" # 0, 1 G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.Nand]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=0)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=1)) # 2, 3 G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[logic.SymmetricThresholdFunction(signs=[-1], threshold=1)]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=0)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=1)) # 4, 5 G = graphs.Network(vertex_names=["A"], edges=[], vertex_functions=[None]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=2)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=2)) # 6, 7 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")], vertex_functions=[logic.SymmetricThresholdFunction(signs=[-1], threshold=1), None]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=0)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=2)) # 8, 9 G = graphs.Network(vertex_names=["A"], edges=[("A", "A")], vertex_functions=[sympy.And]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=2)) experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=None, n_attractors=2)) # 10, 11 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")], vertex_functions=[sympy.Nand, sympy.And]) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=0)) experiments.append(ILPAttractorExperimentParameters(G=G, T=4, P=None, n_attractors=1)) # 12, 13, 14 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")], vertex_functions=[sympy.Nand, sympy.Nand]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=2)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=3)) experiments.append(ILPAttractorExperimentParameters(G=G, T=15, P=None, n_attractors=3)) # 15, 16 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")], vertex_functions=[lambda x: True, lambda x: False]) experiments.append(ILPAttractorExperimentParameters(G=G, T=4, P=None, n_attractors=1)) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=1)) # 17, 18, 19 G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A"), ("C", "A")], vertex_functions=[logic.SymmetricThresholdFunction.from_function(sympy.Nand, 2), logic.SymmetricThresholdFunction.from_function(sympy.Nand, 1), logic.SymmetricThresholdFunction.from_function(sympy.Nand, 0)]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=3)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=4)) experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=None, n_attractors=4)) # 20 G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A"), ("C", "A")], vertex_functions=[logic.SymmetricThresholdFunction.from_function(sympy.Nand, 2), logic.SymmetricThresholdFunction.from_function(sympy.Nand, 1), None]) experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=None, n_attractors=4)) # 21, 22, 23 G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")], vertex_functions=[sympy.Nand]*3) experiments.append(ILPAttractorExperimentParameters(G=G, T=6, P=None, n_attractors=2)) experiments.append(ILPAttractorExperimentParameters(G=G, T=10, P=None, n_attractors=2)) experiments.append(ILPAttractorExperimentParameters(G=G, T=5, P=None, n_attractors=1)) # 24, 25 # acyclic, should have 2**#input_nodes attractors of length 1 G = graphs.Network(vertex_names=["v1", "v2", "v3", "v4", "v5", "v6"], edges=[("v1", "v4"), ("v2", "v4"), ("v1", "v5"), ("v4", "v6")], vertex_functions=[sympy.Nand]*6) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=8)) experiments.append(ILPAttractorExperimentParameters(G=G, T=6, P=None, n_attractors=8)) # 26, 27 G = graphs.Network(vertex_names=["A1", "B1", "B2", "C1", "C2"], edges=[("A1", "A1"), ("B1", "B2"), ("B2", "B1"), ("C1", "C2"), ("C2", "C1")], vertex_functions=[sympy.And]*5) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=8)) experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=None, n_attractors=20)) # offsets! # 28, 29 # a failed random graph added as a constant test G = graphs.Network( vertex_names=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34'], edges=[('1', '2'), ('2', '16'), ('3', '17'), ('5', '15'), ('6', '29'), ('7', '28'), ('8', '22'), ('9', '28'), ('10', '18'), ('11', '15'), ('12', '24'), ('13', '14'), ('15', '18'), ('16', '26'), ('17', '27'), ('18', '20'), ('19', '23'), ('20', '27'), ('23', '26'), ('24', '29'), ('25', '33'), ('26', '30'), ('27', '32'), ('28', '32'), ('30', '32'), ('31', '34'), ('32', '33'), ('33', '34')], vertex_functions=[None, None, sympy.Nand, None, None, None, None, None, None, None, None, None, None, None, sympy.Or, sympy.Nand, sympy.Nand, sympy.Nand, sympy.Nand, None, sympy.Xor, None, sympy.And, sympy.Nand, sympy.Xor, None, sympy.And, sympy.Nand, sympy.And, sympy.Xor, sympy.Or, None, sympy.Or, sympy.And, sympy.And]) experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=2**17)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=2**17)) # 30, 31, 32, 33 G = graphs.Network.parse_cnet("C:\\Users\\ariel\\Downloads\\Attractors - for Ariel" "\\Attractors - for Ariel\\BNS_Dubrova_2011\\tcr.cnet") experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=8)) experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=8)) experiments.append(ILPAttractorExperimentParameters(G=G, T=6, P=None, n_attractors=9)) experiments.append(ILPAttractorExperimentParameters(G=G, T=8, P=None, n_attractors=9)) print "number of experiments (with keys)={}".format(len(experiments)) for i, experiment in enumerate(experiments): print "experiment #{}".format(i) print "n={}, T={}, expected_n_attractors={}".format(len(experiment.G.vertices), experiment.T, experiment.n_attractors) # continue simplify = bool(random.randint(0, 1)) key_slice_size = random.randint(1, 15) print "key_slice_size={}".format(key_slice_size) n_attractors = len(find_attractors_onestage_enumeration(G=experiment.G, max_len=experiment.T, verbose=False, simplify_general_boolean=simplify, key_slice_size=key_slice_size)) try: self.assertEqual(n_attractors, experiment.n_attractors) except AssertionError as e: print e print experiment.G raise e except Exception as e: raise e # TODO: add dubrova v.s. ILP testing again.<|fim▁end|>
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=6, n_attractors=2)) # 18, 19, 20 G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
<|file_name|>0003_auto_20150607_1606.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import unicode_literals from acl.models import Role from communities.models import Community, CommunityGroup, Committee, CommunityGroupRole from django.db import models, migrations <|fim▁hole|> committees = Committee.objects.all() for c in communities: # Creating groups to existing communities, similar to what they have before. CommunityGroup.objects.bulk_create([ CommunityGroup(community=c, title='chairman', _order=0), CommunityGroup(community=c, title='board', _order=1), CommunityGroup(community=c, title='member', _order=2), CommunityGroup(community=c, title='administrator', _order=3) ]) # Creating roles for existing communities, similar to what they have before. Role.objects.bulk_create([ Role(community=c, title='chairman', based_on='manager'), Role(community=c, title='board', based_on='participant'), Role(community=c, title='member', based_on='observer') ]) for c in committees: # Creating community group roles. CommunityGroupRole.objects.bulk_create([ CommunityGroupRole(committee=c, role=c.community.roles.get(title="chairman"), group=c.community.groups.get(title="chairman")), CommunityGroupRole(committee=c, role=c.community.roles.get(title="board"), group=c.community.groups.get(title="board")), CommunityGroupRole(committee=c, role=c.community.roles.get(title="member"), group=c.community.groups.get(title="member")) ]) class Migration(migrations.Migration): dependencies = [ ('users', '0002_auto_20150607_1350'), ] operations = [ migrations.RunPython(create_default_groups), ]<|fim▁end|>
def create_default_groups(apps, schema_editor): Membership = apps.get_model("users", "Membership") communities = Community.objects.all()
<|file_name|>0002_auto_20160512_1041.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-05-12 08:41 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('cases', '0001_initial'), ('offices', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.AddField( model_name='case', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ),<|fim▁hole|> field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='offices.Office'), ), ]<|fim▁end|>
migrations.AddField( model_name='case', name='office',
<|file_name|>service.py<|end_file_name|><|fim▁begin|># This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c) Ansible Inc, 2016 # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import glob import os import pickle import platform import select import shlex import subprocess import traceback from ansible.module_utils.six import PY2, b from ansible.module_utils._text import to_bytes, to_text def sysv_is_enabled(name): ''' This function will check if the service name supplied is enabled in any of the sysv runlevels :arg name: name of the service to test for ''' return bool(glob.glob('/etc/rc?.d/S??%s' % name)) def get_sysv_script(name): ''' This function will return the expected path for an init script corresponding to the service name supplied. :arg name: name or path of the service to test for ''' if name.startswith('/'): result = name else: result = '/etc/init.d/%s' % name return result def sysv_exists(name): ''' This function will return True or False depending on the existence of an init script corresponding to the service name supplied. :arg name: name of the service to test for ''' return os.path.exists(get_sysv_script(name)) def fail_if_missing(module, found, service, msg=''): ''' This function will return an error or exit gracefully depending on check mode status and if the service is missing or not. :arg module: is an AnsibleModule object, used for it's utility methods :arg found: boolean indicating if services was found or not :arg service: name of service :kw msg: extra info to append to error/success msg when missing ''' if not found: if module.check_mode: module.exit_json(msg="Service %s not found on %s, assuming it will exist on full run" % (service, msg), changed=True) else: module.fail_json(msg='Could not find the requested service %s: %s' % (service, msg)) def daemonize(module, cmd): ''' Execute a command while detaching as a daemon, returns rc, stdout, and stderr. :arg module: is an AnsibleModule object, used for it's utility methods :arg cmd: is a list or string representing the command and options to run This is complex because daemonization is hard for people. What we do is daemonize a part of this module, the daemon runs the command, picks up the return code and output, and returns it to the main process. ''' # init some vars chunk = 4096 # FIXME: pass in as arg? errors = 'surrogate_or_strict' # start it! try: pipe = os.pipe() pid = os.fork() except OSError: module.fail_json(msg="Error while attempting to fork: %s", exception=traceback.format_exc()) # we don't do any locking as this should be a unique module/process if pid == 0: os.close(pipe[0]) # Set stdin/stdout/stderr to /dev/null fd = os.open(os.devnull, os.O_RDWR) # clone stdin/out/err for num in range(3): if fd != num: os.dup2(fd, num) # close otherwise if fd not in range(3): os.close(fd) # Make us a daemon pid = os.fork() # end if not in child if pid > 0: os._exit(0) # get new process session and detach sid = os.setsid() if sid == -1: module.fail_json(msg="Unable to detach session while daemonizing") # avoid possible problems with cwd being removed os.chdir("/") pid = os.fork() if pid > 0: os._exit(0) # if command is string deal with py2 vs py3 conversions for shlex if not isinstance(cmd, list): if PY2: cmd = shlex.split(to_bytes(cmd, errors=errors)) else: cmd = shlex.split(to_text(cmd, errors=errors)) # make sure we always use byte strings run_cmd = [] for c in cmd: run_cmd.append(to_bytes(c, errors=errors)) # execute the command in forked process p = subprocess.Popen(run_cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1])) fds = [p.stdout, p.stderr] # loop reading output till its done output = {p.stdout: b(""), p.sterr: b("")} while fds: rfd, wfd, efd = select.select(fds, [], fds, 1) if (rfd + wfd + efd) or p.poll(): for out in fds: if out in rfd: data = os.read(out.fileno(), chunk) if not data: fds.remove(out) output[out] += b(data) # even after fds close, we might want to wait for pid to die p.wait() # Return a pickled data of parent return_data = pickle.dumps([p.returncode, to_text(output[p.stdout]), to_text(output[p.stderr])], protocol=pickle.HIGHEST_PROTOCOL) os.write(pipe[1], to_bytes(return_data, errors=errors)) # clean up os.close(pipe[1]) os._exit(0) elif pid == -1: module.fail_json(msg="Unable to fork, no exception thrown, probably due to lack of resources, check logs.") else: # in parent os.close(pipe[1]) os.waitpid(pid, 0) # Grab response data after child finishes return_data = b("") while True: rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]]) if pipe[0] in rfd: data = os.read(pipe[0], chunk) if not data: break return_data += b(data) # Note: no need to specify encoding on py3 as this module sends the # pickle to itself (thus same python interpreter so we aren't mixing # py2 and py3)<|fim▁hole|> # Set ps flags if platform.system() == 'SunOS': psflags = '-ef' else: psflags = 'auxww' # Find ps binary psbin = module.get_bin_path('ps', True) (rc, out, err) = module.run_command('%s %s' % (psbin, psflags)) # If rc is 0, set running as appropriate if rc == 0: for line in out.split('\n'): if pattern in line: return True return False<|fim▁end|>
return pickle.loads(to_bytes(return_data, errors=errors)) def check_ps(module, pattern):
<|file_name|>bitcoin_pt_PT.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="pt_PT" version="2.1"> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About LVGcoin</source> <translation type="unfinished"/> </message> <message> <location line="+39"/> <source>&lt;b&gt;LVGcoin&lt;/b&gt; version</source> <translation type="unfinished"/> </message> <message> <location line="+41"/> <source>Copyright © 2009-2014 The Bitcoin developers Copyright © 2012-2014 The NovaCoin developers Copyright © 2015 The LVGcoin developers</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source> <translation> Este é um programa experimental. Distribuído sob uma licença de software MIT/X11, por favor verifique o ficheiro anexo license.txt ou http://www.opensource.org/licenses/mit-license.php. Este produto inclui software desenvolvido pelo Projecto OpenSSL para uso no OpenSSL Toolkit (http://www.openssl.org/), software criptográfico escrito por Eric Young ([email protected]) e software UPnP escrito por Thomas Bernard.</translation> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>Address Book</source> <translation type="unfinished"/> </message> <message> <location line="+22"/> <source>Double-click to edit address or label</source> <translation>Clique duas vezes para editar o endereço ou o rótulo</translation> </message> <message> <location line="+27"/> <source>Create a new address</source> <translation>Criar um novo endereço</translation> </message> <message> <location line="+14"/> <source>Copy the currently selected address to the system clipboard</source> <translation>Copie o endereço selecionado para a área de transferência</translation> </message> <message> <location line="-11"/> <source>&amp;New Address</source> <translation type="unfinished"/> </message> <message> <location line="-46"/> <source>These are your LVGcoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source> <translation type="unfinished"/> </message> <message> <location line="+60"/> <source>&amp;Copy Address</source> <translation>&amp;Copiar Endereço</translation> </message> <message> <location line="+11"/> <source>Show &amp;QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Sign a message to prove you own a LVGcoin address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Delete the currently selected address from the list</source> <translation>Apagar o endereço selecionado da lista</translation> </message> <message> <location line="-14"/> <source>Verify a message to ensure it was signed with a specified LVGcoin address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Verify Message</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>&amp;Delete</source> <translation>E&amp;liminar</translation> </message> <message> <location filename="../addressbookpage.cpp" line="+65"/> <source>Copy &amp;Label</source> <translation>Copiar &amp;Rótulo</translation> </message> <message> <location line="+2"/> <source>&amp;Edit</source> <translation>&amp;Editar</translation> </message> <message> <location line="+250"/> <source>Export Address Book Data</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Ficheiro separado por vírgulas (*.csv)</translation> </message> <message> <location line="+13"/> <source>Error exporting</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"/> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+144"/> <source>Label</source> <translation>Rótulo</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Endereço</translation> </message> <message> <location line="+36"/> <source>(no label)</source> <translation>(sem rótulo)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation>Diálogo de Frase-Passe</translation> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation>Escreva a frase de segurança</translation> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation>Nova frase de segurança</translation> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation>Repita a nova frase de segurança</translation> </message> <message> <location line="+33"/> <source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>For staking only</source> <translation type="unfinished"/> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+35"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;10 or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>Escreva a nova frase de seguraça da sua carteira. &lt;br/&gt; Por favor, use uma frase de &lt;b&gt;10 ou mais caracteres aleatórios,&lt;/b&gt; ou &lt;b&gt;oito ou mais palavras&lt;/b&gt;.</translation> </message> <message> <location line="+1"/> <source>Encrypt wallet</source> <translation>Encriptar carteira</translation> </message> <message> <location line="+7"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>A sua frase de segurança é necessária para desbloquear a carteira.</translation> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation>Desbloquear carteira</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>A sua frase de segurança é necessária para desencriptar a carteira.</translation> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation>Desencriptar carteira</translation> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation>Alterar frase de segurança</translation> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation>Escreva a frase de segurança antiga seguida da nova para a carteira.</translation> </message> <message> <location line="+46"/> <source>Confirm wallet encryption</source> <translation>Confirmar encriptação da carteira</translation> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR COINS&lt;/b&gt;!</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation>Tem a certeza que deseja encriptar a carteira?</translation> </message> <message> <location line="+15"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>IMPORTANTE: Qualquer cópia de segurança anterior da carteira deverá ser substituída com o novo, actualmente encriptado, ficheiro de carteira. Por razões de segurança, cópias de segurança não encriptadas efectuadas anteriormente do ficheiro da carteira tornar-se-ão inúteis assim que começar a usar a nova carteira encriptada.</translation> </message> <message> <location line="+103"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation>Atenção: A tecla Caps Lock está activa!</translation> </message> <message> <location line="-133"/> <location line="+60"/> <source>Wallet encrypted</source> <translation>Carteira encriptada</translation> </message> <message> <location line="-58"/> <source>LVGcoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+7"/> <location line="+44"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation>A encriptação da carteira falhou</translation> </message> <message> <location line="-56"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>A encriptação da carteira falhou devido a um erro interno. A carteira não foi encriptada.</translation> </message> <message> <location line="+7"/> <location line="+50"/> <source>The supplied passphrases do not match.</source> <translation>As frases de segurança fornecidas não coincidem.</translation> </message> <message> <location line="-38"/> <source>Wallet unlock failed</source> <translation>O desbloqueio da carteira falhou</translation> </message> <message> <location line="+1"/> <location line="+12"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>A frase de segurança introduzida para a desencriptação da carteira estava incorreta.</translation> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation>A desencriptação da carteira falhou</translation> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation>A frase de segurança da carteira foi alterada com êxito.</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+282"/> <source>Sign &amp;message...</source> <translation>Assinar &amp;mensagem...</translation> </message> <message> <location line="+251"/> <source>Synchronizing with network...</source> <translation>Sincronizando com a rede...</translation> </message> <message> <location line="-319"/> <source>&amp;Overview</source> <translation>Visã&amp;o geral</translation> </message> <message> <location line="+1"/> <source>Show general overview of wallet</source> <translation>Mostrar visão geral da carteira</translation> </message> <message> <location line="+17"/> <source>&amp;Transactions</source> <translation>&amp;Transações</translation> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation>Navegar pelo histórico de transações</translation> </message> <message> <location line="+5"/> <source>&amp;Address Book</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Edit the list of stored addresses and labels</source> <translation type="unfinished"/> </message> <message> <location line="-13"/> <source>&amp;Receive coins</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show the list of addresses for receiving payments</source> <translation type="unfinished"/> </message> <message> <location line="-7"/> <source>&amp;Send coins</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>E&amp;xit</source> <translation>Fec&amp;har</translation> </message> <message> <location line="+1"/> <source>Quit application</source> <translation>Sair da aplicação</translation> </message> <message> <location line="+6"/> <source>Show information about LVGcoin</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>About &amp;Qt</source> <translation>Sobre &amp;Qt</translation> </message> <message> <location line="+1"/> <source>Show information about Qt</source> <translation>Mostrar informação sobre Qt</translation> </message> <message> <location line="+2"/> <source>&amp;Options...</source> <translation>&amp;Opções...</translation> </message> <message> <location line="+4"/> <source>&amp;Encrypt Wallet...</source> <translation>E&amp;ncriptar Carteira...</translation> </message> <message> <location line="+3"/> <source>&amp;Backup Wallet...</source> <translation>&amp;Guardar Carteira...</translation> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation>Mudar &amp;Palavra-passe...</translation> </message> <message numerus="yes"> <location line="+259"/> <source>~%n block(s) remaining</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+6"/> <source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source> <translation type="unfinished"/> </message> <message> <location line="-256"/> <source>&amp;Export...</source> <translation type="unfinished"/> </message> <message> <location line="-64"/> <source>Send coins to a LVGcoin address</source> <translation type="unfinished"/> </message> <message> <location line="+47"/> <source>Modify configuration options for LVGcoin</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>Export the data in the current tab to a file</source> <translation type="unfinished"/> </message> <message> <location line="-14"/> <source>Encrypt or decrypt wallet</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Backup wallet to another location</source> <translation>Faça uma cópia de segurança da carteira para outra localização</translation> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation>Mudar a frase de segurança utilizada na encriptação da carteira</translation> </message> <message> <location line="+10"/> <source>&amp;Debug window</source> <translation>Janela de &amp;depuração</translation> </message> <message> <location line="+1"/> <source>Open debugging and diagnostic console</source> <translation>Abrir consola de diagnóstico e depuração</translation> </message> <message> <location line="-5"/> <source>&amp;Verify message...</source> <translation>&amp;Verificar mensagem...</translation> </message> <message> <location line="-202"/> <source>LVGcoin</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Wallet</source> <translation>Carteira</translation> </message> <message> <location line="+180"/> <source>&amp;About LVGcoin</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>&amp;Show / Hide</source> <translation>Mo&amp;strar / Ocultar</translation> </message> <message> <location line="+9"/> <source>Unlock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>&amp;Lock Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Lock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>&amp;File</source> <translation>&amp;Ficheiro</translation> </message> <message> <location line="+8"/> <source>&amp;Settings</source> <translation>Con&amp;figurações</translation> </message> <message> <location line="+8"/> <source>&amp;Help</source> <translation>A&amp;juda</translation> </message> <message> <location line="+12"/> <source>Tabs toolbar</source> <translation>Barra de separadores</translation> </message> <message> <location line="+8"/> <source>Actions toolbar</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+9"/> <source>[testnet]</source> <translation>[rede de testes]</translation> </message> <message> <location line="+0"/> <location line="+60"/> <source>LVGcoin client</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+75"/> <source>%n active connection(s) to LVGcoin network</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+40"/> <source>Downloaded %1 blocks of transaction history.</source> <translation type="unfinished"/> </message> <message> <location line="+413"/> <source>Staking.&lt;br&gt;Your weight is %1&lt;br&gt;Network weight is %2&lt;br&gt;Expected time to earn reward is %3</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Not staking because wallet is locked</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because wallet is offline</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because wallet is syncing</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because you don&apos;t have mature coins</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="-403"/> <source>%n second(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="-312"/> <source>About LVGcoin card</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show information about LVGcoin card</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>&amp;Unlock Wallet...</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+297"/> <source>%n minute(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n hour(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+6"/> <source>Up to date</source> <translation>Atualizado</translation> </message> <message> <location line="+7"/> <source>Catching up...</source> <translation>Recuperando...</translation> </message> <message> <location line="+10"/> <source>Last received block was generated %1.</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirm transaction fee</source> <translation type="unfinished"/> </message> <message> <location line="+27"/> <source>Sent transaction</source> <translation>Transação enviada</translation> </message> <message> <location line="+1"/> <source>Incoming transaction</source> <translation>Transação recebida</translation> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation>Data: %1 Quantia: %2 Tipo: %3 Endereço: %4 </translation> </message> <message> <location line="+100"/> <location line="+15"/> <source>URI handling</source> <translation type="unfinished"/> </message> <message> <location line="-15"/> <location line="+15"/> <source>URI can not be parsed! This can be caused by an invalid LVGcoin address or malformed URI parameters.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>A carteira está &lt;b&gt;encriptada&lt;/b&gt; e atualmente &lt;b&gt;desbloqueada&lt;/b&gt;</translation> </message> <message> <location line="+10"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>A carteira está &lt;b&gt;encriptada&lt;/b&gt; e atualmente &lt;b&gt;bloqueada&lt;/b&gt;</translation> </message> <message> <location line="+25"/> <source>Backup Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Wallet Data (*.dat)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Backup Failed</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to the new location.</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+76"/> <source>%n second(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n minute(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n hour(s)</source> <translation><numerusform>%n hora</numerusform><numerusform>%n horas</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s)</source> <translation><numerusform>%n dia</numerusform><numerusform>%n dias</numerusform></translation> </message> <message> <location line="+18"/> <source>Not staking</source> <translation type="unfinished"/> </message> <message> <location filename="../bitcoin.cpp" line="+109"/> <source>A fatal error occurred. LVGcoin can no longer continue safely and will quit.</source> <translation type="unfinished"/> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+90"/> <source>Network Alert</source> <translation>Alerta da Rede</translation> </message> </context> <context> <name>CoinControlDialog</name> <message> <location filename="../forms/coincontroldialog.ui" line="+14"/> <source>Coin Control</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Quantity:</source> <translation>Quantidade:</translation> </message> <message> <location line="+32"/> <source>Bytes:</source> <translation>Bytes:</translation> </message> <message> <location line="+48"/> <source>Amount:</source> <translation>Quantia:</translation> </message> <message> <location line="+32"/> <source>Priority:</source> <translation>Prioridade:</translation> </message> <message> <location line="+48"/> <source>Fee:</source> <translation>Taxa:</translation> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation>Saída Baixa:</translation> </message> <message> <location filename="../coincontroldialog.cpp" line="+551"/> <source>no</source> <translation>não</translation> </message> <message> <location filename="../forms/coincontroldialog.ui" line="+51"/> <source>After Fee:</source> <translation>Depois de taxas:</translation> </message> <message> <location line="+35"/> <source>Change:</source> <translation>Troco:</translation> </message> <message> <location line="+69"/> <source>(un)select all</source> <translation>(des)seleccionar todos</translation> </message> <message> <location line="+13"/> <source>Tree mode</source> <translation>Modo de árvore</translation> </message> <message> <location line="+16"/> <source>List mode</source> <translation>Modo lista</translation> </message> <message> <location line="+45"/> <source>Amount</source> <translation>Quantia</translation> </message> <message> <location line="+5"/> <source>Label</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Address</source> <translation>Endereço</translation> </message> <message> <location line="+5"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+5"/> <source>Confirmations</source> <translation>Confirmados</translation> </message> <message> <location line="+3"/> <source>Confirmed</source> <translation>Confirmada</translation> </message> <message> <location line="+5"/> <source>Priority</source> <translation>Prioridade</translation> </message> <message> <location filename="../coincontroldialog.cpp" line="-515"/> <source>Copy address</source> <translation>Copiar endereço</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Copiar rótulo</translation> </message> <message> <location line="+1"/> <location line="+26"/> <source>Copy amount</source> <translation>Copiar quantia</translation> </message> <message> <location line="-25"/> <source>Copy transaction ID</source> <translation>Copiar ID da Transação</translation> </message> <message> <location line="+24"/> <source>Copy quantity</source> <translation>Copiar quantidade</translation> </message> <message> <location line="+2"/> <source>Copy fee</source> <translation>Taxa de cópia</translation> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation>Taxa depois de cópia</translation> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation>Copiar bytes</translation> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation>Prioridade de Cópia</translation> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation>Copiar output baixo</translation> </message> <message> <location line="+1"/> <source>Copy change</source> <translation>Copiar alteração</translation> </message> <message> <location line="+317"/> <source>highest</source> <translation>o maior</translation> </message> <message> <location line="+1"/> <source>high</source> <translation>alto</translation> </message> <message> <location line="+1"/> <source>medium-high</source> <translation>médio-alto</translation> </message> <message> <location line="+1"/> <source>medium</source> <translation>médio</translation> </message> <message> <location line="+4"/> <source>low-medium</source> <translation>baixo-médio</translation> </message> <message> <location line="+1"/> <source>low</source> <translation>baixo</translation> </message> <message> <location line="+1"/> <source>lowest</source> <translation>O mais baixo</translation> </message> <message> <location line="+155"/> <source>DUST</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>yes</source> <translation>sim</translation> </message> <message> <location line="+10"/> <source>This label turns red, if the transaction size is bigger than 10000 bytes. This means a fee of at least %1 per kb is required. Can vary +/- 1 Byte per input.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Transactions with higher priority get more likely into a block. This label turns red, if the priority is smaller than &quot;medium&quot;. This means a fee of at least %1 per kb is required.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if any recipient receives an amount smaller than %1. This means a fee of at least %2 is required. Amounts below 0.546 times the minimum relay fee are shown as DUST.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if the change is smaller than %1. This means a fee of at least %2 is required.</source> <translation type="unfinished"/> </message> <message> <location line="+37"/> <location line="+66"/> <source>(no label)</source> <translation>(Sem rótulo)</translation> </message> <message> <location line="-9"/> <source>change from %1 (%2)</source> <translation>Alteração de %1 (%2)</translation> </message> <message> <location line="+1"/> <source>(change)</source> <translation>(Alteração)</translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation>Editar Endereço</translation> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation>&amp;Rótulo</translation> </message> <message> <location line="+10"/> <source>The label associated with this address book entry</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Address</source> <translation>E&amp;ndereço</translation> </message> <message> <location line="+10"/> <source>The address associated with this address book entry. This can only be modified for sending addresses.</source> <translation type="unfinished"/> </message> <message> <location filename="../editaddressdialog.cpp" line="+20"/> <source>New receiving address</source> <translation>Novo endereço de entrada</translation> </message> <message> <location line="+4"/> <source>New sending address</source> <translation>Novo endereço de saída</translation> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation>Editar endereço de entrada</translation> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation>Editar endereço de saída</translation> </message> <message> <location line="+76"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation>O endereço introduzido &quot;%1&quot; já se encontra no livro de endereços.</translation> </message> <message> <location line="-5"/> <source>The entered address &quot;%1&quot; is not a valid LVGcoin address.</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Could not unlock wallet.</source> <translation>Impossível desbloquear carteira.</translation> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation>Falha ao gerar nova chave.</translation> </message> </context> <context> <name>GUIUtil::HelpMessageBox</name> <message> <location filename="../guiutil.cpp" line="+420"/> <location line="+12"/> <source>lvgcoin-qt</source> <translation type="unfinished"/> </message> <message> <location line="-12"/> <source>version</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Usage:</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>UI options</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation type="unfinished"/> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation>Opções</translation> </message> <message> <location line="+16"/> <source>&amp;Main</source> <translation>&amp;Principal</translation> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Pay transaction &amp;fee</source> <translation>Pagar &amp;taxa de transação</translation> </message> <message> <location line="+31"/> <source>Reserved amount does not participate in staking and is therefore spendable at any time.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Reserve</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Automatically start LVGcoin after logging in to the system.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Start LVGcoin on system login</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Detach databases at shutdown</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>&amp;Network</source> <translation>&amp;Rede</translation> </message> <message> <location line="+6"/> <source>Automatically open the LVGcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation>Mapear porta usando &amp;UPnP</translation> </message> <message> <location line="+7"/> <source>Connect to the LVGcoin network through a SOCKS proxy (e.g. when connecting through Tor).</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS proxy:</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Proxy &amp;IP:</source> <translation>&amp;IP do proxy:</translation> </message> <message> <location line="+19"/> <source>IP address of the proxy (e.g. 127.0.0.1)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Port:</source> <translation>&amp;Porta:</translation> </message> <message> <location line="+19"/> <source>Port of the proxy (e.g. 9050)</source> <translation>Porta do proxy (p.ex. 9050)</translation> </message> <message> <location line="+7"/> <source>SOCKS &amp;Version:</source> <translation>&amp;Versão SOCKS:</translation> </message> <message> <location line="+13"/> <source>SOCKS version of the proxy (e.g. 5)</source> <translation>Versão do proxy SOCKS (p.ex. 5)</translation> </message> <message> <location line="+36"/> <source>&amp;Window</source> <translation>&amp;Janela</translation> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation>Apenas mostrar o ícone da bandeja após minimizar a janela.</translation> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>&amp;Minimizar para a bandeja e não para a barra de ferramentas</translation> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation>Minimize ao invés de sair da aplicação quando a janela é fechada. Com esta opção selecionada, a aplicação apenas será encerrada quando escolher Sair da aplicação no menú.</translation> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation>M&amp;inimizar ao fechar</translation> </message> <message> <location line="+21"/> <source>&amp;Display</source> <translation>Vis&amp;ualização</translation> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation>&amp;Linguagem da interface de utilizador:</translation> </message> <message> <location line="+13"/> <source>The user interface language can be set here. This setting will take effect after restarting LVGcoin.</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation>&amp;Unidade a usar em quantias:</translation> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>Escolha a subdivisão unitária a ser mostrada por defeito na aplicação e ao enviar moedas.</translation> </message> <message> <location line="+9"/> <source>Whether to show LVGcoin addresses in the transaction list or not.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Display addresses in transaction list</source> <translation>Mostrar en&amp;dereços na lista de transações</translation> </message> <message> <location line="+7"/> <source>Whether to show coin control features or not.</source> <translation>Escolha para mostrar funcionalidades de controlo &quot;coin&quot; ou não.</translation> </message> <message> <location line="+3"/> <source>Display coin &amp;control features (experts only!)</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;OK</source> <translation>&amp;OK</translation> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation>&amp;Cancelar</translation> </message> <message> <location line="+10"/> <source>&amp;Apply</source> <translation type="unfinished"/> </message> <message> <location filename="../optionsdialog.cpp" line="+55"/> <source>default</source> <translation>padrão</translation> </message> <message> <location line="+149"/> <location line="+9"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="-9"/> <location line="+9"/> <source>This setting will take effect after restarting LVGcoin.</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The supplied proxy address is invalid.</source> <translation>O endereço de proxy introduzido é inválido. </translation> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+14"/> <source>Form</source> <translation>Formulário</translation> </message> <message> <location line="+33"/> <location line="+231"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the LVGcoin network after a connection is established, but this process has not completed yet.</source> <translation type="unfinished"/> </message> <message> <location line="-160"/> <source>Stake:</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>Unconfirmed:</source> <translation type="unfinished"/> </message> <message> <location line="-107"/> <source>Wallet</source> <translation>Carteira</translation> </message> <message> <location line="+49"/> <source>Spendable:</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Your current spendable balance</source> <translation>O seu saldo disponível para gastar</translation> </message> <message> <location line="+71"/> <source>Immature:</source> <translation>Imaturo:</translation> </message> <message> <location line="+13"/> <source>Mined balance that has not yet matured</source> <translation>O saldo minado ainda não maturou</translation> </message> <message> <location line="+20"/> <source>Total:</source> <translation>Total:</translation> </message> <message> <location line="+16"/> <source>Your current total balance</source> <translation>O seu saldo total actual</translation> </message> <message> <location line="+46"/> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation>&lt;b&gt;Transações recentes&lt;/b&gt;</translation> </message> <message> <location line="-108"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location line="-29"/> <source>Total of coins that was staked, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location filename="../overviewpage.cpp" line="+113"/> <location line="+1"/> <source>out of sync</source> <translation>fora de sincronia</translation> </message> </context> <context> <name>QRCodeDialog</name> <message> <location filename="../forms/qrcodedialog.ui" line="+14"/> <source>QR Code Dialog</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>Request Payment</source> <translation type="unfinished"/> </message> <message> <location line="+56"/> <source>Amount:</source> <translation type="unfinished"/> </message> <message> <location line="-44"/> <source>Label:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Message:</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;Save As...</source> <translation type="unfinished"/> </message> <message> <location filename="../qrcodedialog.cpp" line="+62"/> <source>Error encoding URI into QR Code.</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>The entered amount is invalid, please check.</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Save QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>PNG Images (*.png)</source> <translation type="unfinished"/> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+46"/> <source>Client name</source> <translation>Nome do Cliente</translation> </message> <message> <location line="+10"/> <location line="+23"/> <location line="+26"/> <location line="+23"/> <location line="+23"/> <location line="+36"/> <location line="+53"/> <location line="+23"/> <location line="+23"/> <location filename="../rpcconsole.cpp" line="+348"/> <source>N/A</source> <translation>N/D</translation> </message> <message> <location line="-217"/> <source>Client version</source> <translation>Versão do Cliente</translation> </message> <message> <location line="-45"/> <source>&amp;Information</source> <translation>&amp;Informação</translation> </message> <message> <location line="+68"/> <source>Using OpenSSL version</source> <translation>Usando versão OpenSSL</translation> </message> <message> <location line="+49"/> <source>Startup time</source> <translation>Tempo de início</translation> </message> <message> <location line="+29"/> <source>Network</source> <translation>Rede</translation> </message> <message> <location line="+7"/> <source>Number of connections</source> <translation>Número de ligações</translation> </message> <message> <location line="+23"/> <source>On testnet</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Block chain</source> <translation>Cadeia de blocos</translation> </message> <message> <location line="+7"/> <source>Current number of blocks</source> <translation>Número actual de blocos</translation> </message> <message> <location line="+23"/> <source>Estimated total blocks</source> <translation>Total estimado de blocos</translation> </message> <message> <location line="+23"/> <source>Last block time</source> <translation>Tempo do último bloco</translation> </message> <message> <location line="+52"/> <source>&amp;Open</source> <translation>&amp;Abrir</translation> </message> <message> <location line="+16"/> <source>Command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Show the lvgcoin-qt help message to get a list with possible LVGcoin command-line options.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Show</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>&amp;Console</source> <translation>&amp;Consola</translation> </message> <message> <location line="-260"/> <source>Build date</source> <translation>Data de construção</translation> </message> <message> <location line="-104"/> <source>LVGcoin - Debug window</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>LVGcoin Core</source> <translation type="unfinished"/> </message> <message> <location line="+279"/> <source>Debug log file</source> <translation>Ficheiro de registo de depuração</translation> </message> <message> <location line="+7"/> <source>Open the LVGcoin debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation type="unfinished"/> </message> <message> <location line="+102"/> <source>Clear console</source> <translation>Limpar consola</translation> </message> <message> <location filename="../rpcconsole.cpp" line="-33"/> <source>Welcome to the LVGcoin RPC console.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation>Use as setas para cima e para baixo para navegar no histórico e &lt;b&gt;Ctrl-L&lt;/b&gt; para limpar o ecrã.</translation> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation>Digite &lt;b&gt;help&lt;/b&gt; para visualizar os comandos disponíveis.</translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+182"/> <location line="+5"/> <location line="+5"/> <location line="+5"/> <location line="+6"/> <location line="+5"/> <location line="+5"/> <source>Send Coins</source> <translation>Enviar Moedas</translation> </message> <message> <location line="+76"/> <source>Coin Control Features</source> <translation>Funcionalidades de Coin Controlo:</translation> </message> <message> <location line="+20"/> <source>Inputs...</source> <translation>Entradas</translation> </message> <message> <location line="+7"/> <source>automatically selected</source> <translation>Selecção automática</translation> </message> <message> <location line="+19"/> <source>Insufficient funds!</source> <translation>Fundos insuficientes!</translation> </message> <message> <location line="+77"/> <source>Quantity:</source> <translation>Quantidade:</translation> </message> <message> <location line="+22"/> <location line="+35"/> <source>0</source> <translation type="unfinished"/> </message> <message> <location line="-19"/> <source>Bytes:</source> <translation>Bytes:</translation> </message> <message> <location line="+51"/> <source>Amount:</source> <translation>Quantia:</translation> </message> <message> <location line="+22"/> <location line="+86"/> <location line="+86"/> <location line="+32"/> <source>0.00 hack</source> <translation type="unfinished"/> </message> <message> <location line="-191"/> <source>Priority:</source> <translation>Prioridade:</translation> </message> <message> <location line="+19"/> <source>medium</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Fee:</source> <translation>Taxa:</translation> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation>Output Baixo:</translation> </message> <message> <location line="+19"/> <source>no</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>After Fee:</source> <translation>Depois de taxas:</translation> </message> <message> <location line="+35"/> <source>Change</source> <translation type="unfinished"/> </message> <message> <location line="+50"/> <source>custom change address</source> <translation type="unfinished"/> </message> <message> <location line="+106"/> <source>Send to multiple recipients at once</source> <translation>Enviar para múltiplos destinatários de uma vez</translation> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation>Adicionar &amp;Destinatário</translation> </message> <message> <location line="+20"/> <source>Remove all transaction fields</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Clear &amp;All</source> <translation>&amp;Limpar Tudo</translation> </message> <message> <location line="+28"/> <source>Balance:</source> <translation>Saldo:</translation> </message> <message> <location line="+16"/> <source>123.456 hack</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Confirm the send action</source> <translation>Confirme ação de envio</translation> </message> <message> <location line="+3"/> <source>S&amp;end</source> <translation>&amp;Enviar</translation> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-173"/> <source>Enter a LVGcoin address (e.g. Ld9EDYP4c2M3rXuETZ9sSXbMsx8yAQzTm9)</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Copy quantity</source> <translation>Copiar quantidade</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Copiar quantia</translation> </message> <message> <location line="+1"/> <source>Copy fee</source> <translation>Taxa de cópia</translation> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation>Taxa depois de cópia</translation> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation>Copiar bytes</translation> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation>Prioridade de Cópia</translation> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation>Copiar output baixo</translation> </message> <message> <location line="+1"/> <source>Copy change</source> <translation>Copiar alteração</translation> </message> <message> <location line="+86"/> <source>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirm send coins</source> <translation>Confirme envio de moedas</translation> </message> <message> <location line="+1"/> <source>Are you sure you want to send %1?</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source> and </source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The recipient address is not valid, please recheck.</source> <translation>O endereço de destino não é válido, por favor verifique.</translation> </message> <message> <location line="+5"/> <source>The amount to pay must be larger than 0.</source> <translation>A quantia a pagar deverá ser maior que 0.</translation> </message> <message> <location line="+5"/> <source>The amount exceeds your balance.</source> <translation>A quantia excede o seu saldo.</translation> </message> <message> <location line="+5"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>O total excede o seu saldo quando a taxa de transação de %1 for incluída.</translation> </message> <message> <location line="+6"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation>Endereço duplicado encontrado, apenas poderá enviar uma vez para cada endereço por cada operação de envio.</translation> </message> <message> <location line="+5"/> <source>Error: Transaction creation failed.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="+251"/> <source>WARNING: Invalid LVGcoin address</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>(no label)</source> <translation>(Sem rótulo)</translation> </message> <message> <location line="+4"/> <source>WARNING: unknown change address</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+14"/> <source>Form</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>A&amp;mount:</source> <translation>Qu&amp;antia:</translation> </message> <message> <location line="+13"/> <source>Pay &amp;To:</source> <translation>&amp;Pagar A:</translation> </message> <message> <location line="+24"/> <location filename="../sendcoinsentry.cpp" line="+25"/> <source>Enter a label for this address to add it to your address book</source> <translation>Escreva um rótulo para este endereço para o adicionar ao seu livro de endereços</translation> </message> <message> <location line="+9"/> <source>&amp;Label:</source> <translation>Rótu&amp;lo:</translation> </message> <message> <location line="+18"/> <source>The address to send the payment to (e.g. Ld9EDYP4c2M3rXuETZ9sSXbMsx8yAQzTm9)</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Choose address from address book</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation>Cole endereço da área de transferência</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+7"/> <source>Remove this recipient</source> <translation type="unfinished"/> </message> <message> <location filename="../sendcoinsentry.cpp" line="+1"/> <source>Enter a LVGcoin address (e.g. Ld9EDYP4c2M3rXuETZ9sSXbMsx8yAQzTm9)</source> <translation type="unfinished"/> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation>Assinaturas - Assinar / Verificar uma Mensagem</translation> </message> <message> <location line="+13"/> <location line="+124"/> <source>&amp;Sign Message</source> <translation>A&amp;ssinar Mensagem</translation> </message> <message> <location line="-118"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation>Pode assinar mensagens com os seus endereços para provar que são seus. Tenha atenção ao assinar mensagens ambíguas, pois ataques de phishing podem tentar enganá-lo, de modo a assinar a sua identidade para os atacantes. Apenas assine declarações completamente detalhadas com as quais concorde.</translation> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. Ld9EDYP4c2M3rXuETZ9sSXbMsx8yAQzTm9)</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <location line="+203"/> <source>Choose an address from the address book</source> <translation type="unfinished"/> </message> <message> <location line="-193"/> <location line="+203"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="-193"/> <source>Paste address from clipboard</source> <translation>Cole endereço da área de transferência</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation>Escreva aqui a mensagem que deseja assinar</translation> </message> <message> <location line="+24"/> <source>Copy the current signature to the system clipboard</source> <translation>Copiar a assinatura actual para a área de transferência</translation> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this LVGcoin address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all sign message fields</source> <translation>Repôr todos os campos de assinatura de mensagem</translation> </message> <message> <location line="+3"/> <location line="+146"/> <source>Clear &amp;All</source> <translation>Limpar &amp;Tudo</translation> </message> <message> <location line="-87"/> <location line="+70"/> <source>&amp;Verify Message</source> <translation>&amp;Verificar Mensagem</translation> </message> <message> <location line="-64"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation>Introduza o endereço de assinatura, mensagem (assegure-se de copiar quebras de linha, espaços, tabuladores, etc. exactamente) e assinatura abaixo para verificar a mensagem. Tenha atenção para não ler mais na assinatura do que o que estiver na mensagem assinada, para evitar ser enganado por um atacante que se encontre entre si e quem assinou a mensagem.</translation> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. Ld9EDYP4c2M3rXuETZ9sSXbMsx8yAQzTm9)</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>Verify the message to ensure it was signed with the specified LVGcoin address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all verify message fields</source> <translation>Repôr todos os campos de verificação de mensagem</translation> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+27"/> <location line="+3"/> <source>Enter a LVGcoin address (e.g. Ld9EDYP4c2M3rXuETZ9sSXbMsx8yAQzTm9)</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation>Clique &quot;Assinar mensagem&quot; para gerar a assinatura</translation> </message> <message> <location line="+3"/> <source>Enter LVGcoin signature</source> <translation type="unfinished"/> </message> <message> <location line="+82"/> <location line="+81"/> <source>The entered address is invalid.</source> <translation>O endereço introduzido é inválido. </translation> </message> <message> <location line="-81"/> <location line="+8"/> <location line="+73"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation>Por favor verifique o endereço e tente de novo.</translation> </message> <message> <location line="-81"/> <location line="+81"/> <source>The entered address does not refer to a key.</source> <translation>O endereço introduzido não refere a chave alguma.</translation> </message> <message> <location line="-73"/> <source>Wallet unlock was cancelled.</source> <translation>O desbloqueio da carteira foi cancelado.</translation> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation>A chave privada para o endereço introduzido não está disponível.</translation> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation>Assinatura de mensagem falhou.</translation> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation>Mensagem assinada.</translation> </message> <message> <location line="+59"/> <source>The signature could not be decoded.</source> <translation>A assinatura não pôde ser descodificada.</translation> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation>Por favor verifique a assinatura e tente de novo.</translation> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation>A assinatura não condiz com o conteúdo da mensagem.</translation> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation>Verificação da mensagem falhou.</translation> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation>Mensagem verificada.</translation> </message> </context> <context> <name>TransactionDesc</name> <message> <location filename="../transactiondesc.cpp" line="+19"/> <source>Open until %1</source> <translation>Aberto até %1</translation> </message> <message numerus="yes"> <location line="-2"/> <source>Open for %n block(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+8"/> <source>conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1/offline</source> <translation>%1/desligado</translation> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation>%1/não confirmada</translation> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation>%1 confirmações</translation> </message> <message> <location line="+18"/> <source>Status</source> <translation>Estado</translation> </message> <message numerus="yes"> <location line="+7"/> <source>, broadcast through %n node(s)</source> <translation><numerusform>, transmitida através de %n nó</numerusform><numerusform>, transmitida através de %n nós</numerusform></translation> </message> <message> <location line="+4"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+7"/> <source>Source</source> <translation>Origem</translation> </message> <message> <location line="+0"/> <source>Generated</source> <translation>Gerado</translation> </message> <message> <location line="+5"/> <location line="+17"/> <source>From</source> <translation>De</translation> </message> <message> <location line="+1"/> <location line="+22"/> <location line="+58"/> <source>To</source> <translation>Para</translation> </message> <message> <location line="-77"/> <location line="+2"/> <source>own address</source> <translation>endereço próprio</translation> </message> <message> <location line="-2"/> <source>label</source> <translation>rótulo</translation> </message> <message> <location line="+37"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+30"/> <source>Credit</source> <translation>Crédito</translation> </message> <message numerus="yes"> <location line="-102"/> <source>matures in %n more block(s)</source> <translation><numerusform>matura daqui por %n bloco</numerusform><numerusform>matura daqui por %n blocos</numerusform></translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation>não aceite</translation> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+30"/> <source>Debit</source> <translation>Débito</translation> </message> <message> <location line="-39"/> <source>Transaction fee</source> <translation>Taxa de transação</translation> </message> <message> <location line="+16"/> <source>Net amount</source> <translation>Valor líquido</translation> </message> <message> <location line="+6"/> <source>Message</source> <translation>Mensagem</translation> </message> <message> <location line="+2"/> <source>Comment</source> <translation>Comentário</translation> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation>ID da Transação</translation> </message> <message> <location line="+3"/> <source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Debug information</source> <translation>Informação de depuração</translation> </message> <message> <location line="+8"/> <source>Transaction</source> <translation>Transação</translation> </message> <message> <location line="+5"/> <source>Inputs</source> <translation>Entradas</translation> </message> <message> <location line="+23"/> <source>Amount</source> <translation>Quantia</translation> </message> <message> <location line="+1"/> <source>true</source> <translation>verdadeiro</translation> </message> <message> <location line="+0"/> <source>false</source> <translation>falso</translation> </message> <message> <location line="-211"/> <source>, has not been successfully broadcast yet</source> <translation>, ainda não foi transmitida com sucesso</translation> </message> <message> <location line="+35"/> <source>unknown</source> <translation>desconhecido</translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+14"/> <source>Transaction details</source> <translation>Detalhes da transação</translation> </message> <message> <location line="+6"/> <source>This pane shows a detailed description of the transaction</source> <translation>Esta janela mostra uma descrição detalhada da transação</translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+226"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+0"/> <source>Type</source> <translation>Tipo</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Endereço</translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation>Quantia</translation> </message> <message> <location line="+60"/> <source>Open until %1</source> <translation>Aberto até %1</translation> </message> <message> <location line="+12"/> <source>Confirmed (%1 confirmations)</source> <translation>Confirmada (%1 confirmações)</translation> </message> <message numerus="yes"> <location line="-15"/> <source>Open for %n more block(s)</source> <translation><numerusform>Aberta por mais %n bloco</numerusform><numerusform>Aberta por mais %n blocos</numerusform></translation> </message> <message> <location line="+6"/> <source>Offline</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Unconfirmed</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Confirming (%1 of %2 recommended confirmations)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Immature (%1 confirmations, will be available after %2)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation>Este bloco não foi recebido por outros nós e provavelmente não será aceite pela rede!</translation> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation>Gerado mas não aceite</translation> </message> <message> <location line="+42"/> <source>Received with</source> <translation>Recebido com</translation> </message> <message> <location line="+2"/> <source>Received from</source> <translation>Recebido de</translation> </message> <message> <location line="+3"/> <source>Sent to</source> <translation>Enviado para</translation> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation>Pagamento ao próprio</translation> </message> <message> <location line="+2"/> <source>Mined</source> <translation>Minadas</translation> </message> <message> <location line="+38"/> <source>(n/a)</source> <translation>(n/d)</translation> </message> <message> <location line="+190"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation>Estado da transação. Pairar por cima deste campo para mostrar o número de confirmações.</translation> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation>Data e hora a que esta transação foi recebida.</translation> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation>Tipo de transação.</translation> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation>Endereço de destino da transação.</translation> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation>Quantia retirada ou adicionada ao saldo.</translation> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+55"/> <location line="+16"/> <source>All</source> <translation>Todas</translation> </message> <message> <location line="-15"/> <source>Today</source> <translation>Hoje</translation> </message> <message> <location line="+1"/> <source>This week</source> <translation>Esta semana</translation> </message> <message> <location line="+1"/> <source>This month</source> <translation>Este mês</translation> </message> <message> <location line="+1"/> <source>Last month</source> <translation>Mês passado</translation> </message> <message> <location line="+1"/> <source>This year</source> <translation>Este ano</translation> </message> <message> <location line="+1"/> <source>Range...</source> <translation>Período...</translation> </message> <message> <location line="+11"/> <source>Received with</source> <translation>Recebida com</translation> </message> <message> <location line="+2"/> <source>Sent to</source> <translation>Enviada para</translation> </message> <message> <location line="+2"/> <source>To yourself</source> <translation>Para si</translation> </message> <message> <location line="+1"/> <source>Mined</source> <translation>Minadas</translation> </message> <message> <location line="+1"/> <source>Other</source> <translation>Outras</translation> </message> <message> <location line="+7"/> <source>Enter address or label to search</source> <translation>Escreva endereço ou rótulo a procurar</translation> </message> <message> <location line="+7"/> <source>Min amount</source> <translation>Quantia mínima</translation> </message> <message> <location line="+34"/> <source>Copy address</source> <translation>Copiar endereço</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Copiar rótulo</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Copiar quantia</translation> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation>Copiar ID da Transação</translation> </message> <message> <location line="+1"/> <source>Edit label</source> <translation>Editar rótulo</translation> </message> <message> <location line="+1"/> <source>Show transaction details</source> <translation>Mostrar detalhes da transação</translation> </message> <message> <location line="+144"/> <source>Export Transaction Data</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Ficheiro separado por vírgula (*.csv)</translation> </message> <message> <location line="+8"/> <source>Confirmed</source> <translation>Confirmada</translation> </message> <message> <location line="+1"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+1"/> <source>Type</source> <translation>Tipo</translation> </message> <message> <location line="+1"/> <source>Label</source> <translation>Rótulo</translation> </message> <message> <location line="+1"/> <source>Address</source> <translation>Endereço</translation> </message> <message> <location line="+1"/> <source>Amount</source> <translation>Quantia</translation> </message> <message> <location line="+1"/> <source>ID</source> <translation>ID</translation> </message> <message> <location line="+4"/> <source>Error exporting</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"/> </message> <message> <location line="+100"/> <source>Range:</source> <translation>Período:</translation> </message> <message> <location line="+8"/> <source>to</source> <translation>até</translation> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+206"/> <source>Sending...</source> <translation type="unfinished"/> </message> </context> <context> <name>bitcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+33"/> <source>LVGcoin version</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Usage:</source> <translation>Utilização:</translation> </message> <message> <location line="+1"/> <source>Send command to -server or lvgcoind</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>List commands</source> <translation>Listar comandos</translation> </message> <message> <location line="+1"/> <source>Get help for a command</source> <translation>Obter ajuda para um comando</translation> </message> <message> <location line="+2"/> <source>Options:</source> <translation>Opções:</translation> </message> <message> <location line="+2"/> <source>Specify configuration file (default: LVGcoin.conf)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Specify pid file (default: lvgcoind.pid)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Specify wallet file (within data directory)</source> <translation>Especifique ficheiro de carteira (dentro da pasta de dados)</translation> </message> <message> <location line="-1"/> <source>Specify data directory</source> <translation>Especificar pasta de dados</translation> </message> <message> <location line="+2"/> <source>Set database cache size in megabytes (default: 25)</source> <translation>Definir o tamanho da cache de base de dados em megabytes (por defeito: 25)</translation> </message> <message> <location line="+1"/> <source>Set database disk log size in megabytes (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Listen for connections on &lt;port&gt; (default: 15714 or testnet: 25714)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation>Manter no máximo &lt;n&gt; ligações a outros nós da rede (por defeito: 125)</translation> </message> <message> <location line="+3"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation>Ligar a um nó para recuperar endereços de pares, e desligar</translation> </message> <message> <location line="+1"/> <source>Specify your own public address</source> <translation>Especifique o seu endereço público</translation> </message> <message> <location line="+5"/> <source>Bind to given address. Use [host]:port notation for IPv6</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Stake your coins to support network and gain reward (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation>Tolerância para desligar nós mal-formados (por defeito: 100)</translation> </message> <message> <location line="+1"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation>Número de segundos a impedir que nós mal-formados se liguem de novo (por defeito: 86400)</translation> </message> <message> <location line="-44"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation>Ocorreu um erro ao definir a porta %u do serviço RPC a escutar em IPv4: %s</translation> </message> <message> <location line="+51"/> <source>Detach block and address databases. Increases shutdown time (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+109"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source> <translation type="unfinished"/> </message> <message> <location line="-87"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 15715 or testnet: 25715)</source> <translation type="unfinished"/> </message> <message> <location line="-11"/> <source>Accept command line and JSON-RPC commands</source> <translation>Aceitar comandos da consola e JSON-RPC</translation> </message> <message> <location line="+101"/> <source>Error: Transaction creation failed </source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>Error: Wallet locked, unable to create transaction </source> <translation type="unfinished"/> </message> <message> <location line="-8"/> <source>Importing blockchain data file.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Importing bootstrap blockchain data file.</source> <translation type="unfinished"/> </message> <message> <location line="-88"/> <source>Run in the background as a daemon and accept commands</source> <translation>Correr o processo como um daemon e aceitar comandos</translation> </message> <message> <location line="+1"/> <source>Use the test network</source> <translation>Utilizar a rede de testes - testnet</translation> </message> <message> <location line="-24"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation>Aceitar ligações externas (padrão: 1 sem -proxy ou -connect)</translation> </message> <message> <location line="-38"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation>Ocorreu um erro ao definir a porta %u do serviço RPC a escutar em IPv6, a usar IPv4: %s</translation> </message> <message> <location line="+117"/> <source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source> <translation type="unfinished"/> </message> <message> <location line="-20"/> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation>Atenção: -paytxfee está definida com um valor muito alto! Esta é a taxa que irá pagar se enviar uma transação.</translation> </message> <message> <location line="+61"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong LVGcoin will not work properly.</source> <translation type="unfinished"/> </message> <message> <location line="-31"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation>Atenção: erro ao ler wallet.dat! Todas as chaves foram lidas correctamente, mas dados de transação ou do livro de endereços podem estar em falta ou incorrectos.</translation> </message> <message> <location line="-18"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation>Atenção: wallet.dat corrupto, dados recuperados! wallet.dat original salvo como wallet.{timestamp}.bak em %s; se o seu saldo ou transações estiverem incorrectos deverá recuperar de uma cópia de segurança.</translation> </message> <message> <location line="-30"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation>Tentar recuperar chaves privadas de um wallet.dat corrupto</translation> </message> <message> <location line="+4"/> <source>Block creation options:</source> <translation>Opções de criação de bloco:</translation> </message> <message> <location line="-62"/> <source>Connect only to the specified node(s)</source> <translation>Apenas ligar ao(s) nó(s) especificado(s)</translation> </message> <message> <location line="+4"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation>Descobrir endereço IP próprio (padrão: 1 ao escutar e sem -externalip)</translation> </message> <message> <location line="+94"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation>Falhou a escutar em qualquer porta. Use -listen=0 se quer isto.</translation> </message> <message> <location line="-90"/> <source>Find peers using DNS lookup (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Sync checkpoints policy (default: strict)</source> <translation type="unfinished"/> </message> <message> <location line="+83"/> <source>Invalid -tor address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Invalid amount for -reservebalance=&lt;amount&gt;</source> <translation type="unfinished"/> </message> <message> <location line="-82"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation>Armazenamento intermédio de recepção por ligação, &lt;n&gt;*1000 bytes (por defeito: 5000)</translation> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation>Armazenamento intermédio de envio por ligação, &lt;n&gt;*1000 bytes (por defeito: 1000)</translation> </message> <message> <location line="-16"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation>Apenas ligar a nós na rede &lt;net&gt; (IPv4, IPv6 ou Tor)</translation> </message> <message> <location line="+28"/> <source>Output extra debugging information. Implies all other -debug* options</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Output extra network debugging information</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Prepend debug output with timestamp</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source> <translation>Opções SSL: (ver a Wiki Bitcoin para instruções de configuração SSL)</translation> </message> <message> <location line="-74"/> <source>Select the version of socks proxy to use (4-5, default: 5)</source> <translation type="unfinished"/> </message> <message> <location line="+41"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation>Enviar informação de rastreio/depuração para a consola e não para o ficheiro debug.log</translation> </message> <message> <location line="+1"/> <source>Send trace/debug info to debugger</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>Set maximum block size in bytes (default: 250000)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Set minimum block size in bytes (default: 0)</source> <translation>Definir tamanho minímo de um bloco em bytes (por defeito: 0)</translation> </message> <message> <location line="-29"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation>Encolher ficheiro debug.log ao iniciar o cliente (por defeito: 1 sem -debug definido)</translation> </message> <message> <location line="-42"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation>Especificar tempo de espera da ligação em millisegundos (por defeito: 5000)</translation> </message> <message> <location line="+109"/> <source>Unable to sign checkpoint, wrong checkpointkey? </source> <translation type="unfinished"/> </message> <message> <location line="-80"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation>Usar UPnP para mapear a porta de escuta (padrão: 0)</translation> </message> <message> <location line="-1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation>Usar UPnP para mapear a porta de escuta (padrão: 1 ao escutar)</translation> </message> <message> <location line="-25"/> <source>Use proxy to reach tor hidden services (default: same as -proxy)</source> <translation type="unfinished"/> </message> <message> <location line="+42"/> <source>Username for JSON-RPC connections</source> <translation>Nome de utilizador para ligações JSON-RPC</translation> </message> <message> <location line="+47"/> <source>Verifying database integrity...</source> <translation type="unfinished"/> </message> <message> <location line="+57"/> <source>WARNING: syncronized checkpoint violation detected, but skipped!</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Warning: Disk space is low!</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation>Atenção: Esta versão está obsoleta, é necessário actualizar!</translation> </message> <message> <location line="-48"/> <source>wallet.dat corrupt, salvage failed</source> <translation>wallet.dat corrupta, recuperação falhou</translation> </message> <message> <location line="-54"/> <source>Password for JSON-RPC connections</source> <translation>Palavra-passe para ligações JSON-RPC</translation> </message> <message> <location line="-84"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=LVGcoinrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;LVGcoin Alert&quot; [email protected] </source> <translation type="unfinished"/> </message> <message> <location line="+51"/> <source>Find peers using internet relay chat (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation>Permitir ligações JSON-RPC do endereço IP especificado</translation> </message> <message> <location line="+1"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation>Enviar comandos para o nó a correr em &lt;ip&gt; (por defeito: 127.0.0.1)</translation> </message> <message> <location line="+1"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation>Executar comando quando mudar o melhor bloco (no comando, %s é substituído pela hash do bloco)</translation> </message> <message> <location line="+3"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation>Executar comando quando uma das transações na carteira mudar (no comando, %s é substituído pelo ID da Transação)</translation> </message> <message> <location line="+3"/> <source>Require a confirmations for change (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Upgrade wallet to latest format</source> <translation>Atualize a carteira para o formato mais recente</translation> </message> <message> <location line="+1"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation>Definir o tamanho da memória de chaves para &lt;n&gt; (por defeito: 100)</translation> </message> <message> <location line="+1"/> <source>Rescan the block chain for missing wallet transactions</source> <translation>Reexaminar a cadeia de blocos para transações em falta na carteira</translation> </message> <message> <location line="+2"/> <source>How many blocks to check at startup (default: 2500, 0 = all)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>How thorough the block verification is (0-6, default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Imports blocks from external blk000?.dat file</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation>Usar OpenSSL (https) para ligações JSON-RPC</translation> </message> <message> <location line="+1"/> <source>Server certificate file (default: server.cert)</source> <translation>Ficheiro de certificado do servidor (por defeito: server.cert)</translation> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation>Chave privada do servidor (por defeito: server.pem)</translation> </message> <message> <location line="+1"/> <source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source> <translation type="unfinished"/> </message> <message> <location line="+53"/> <source>Error: Wallet unlocked for staking only, unable to create transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source> <translation type="unfinished"/> </message> <message> <location line="-158"/> <source>This help message</source> <translation>Esta mensagem de ajuda</translation> </message> <message> <location line="+95"/> <source>Wallet %s resides outside data directory %s.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot obtain a lock on data directory %s. LVGcoin is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="-98"/> <source>LVGcoin</source> <translation type="unfinished"/> </message> <message> <location line="+140"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation>Incapaz de vincular a %s neste computador (vínculo retornou erro %d, %s)</translation> </message> <message> <location line="-130"/> <source>Connect through socks proxy</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation>Permitir procuras DNS para -addnode, -seednode e -connect</translation> </message> <message> <location line="+122"/> <source>Loading addresses...</source> <translation>Carregar endereços...</translation> </message> <message> <location line="-15"/> <source>Error loading blkindex.dat</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation>Erro ao carregar wallet.dat: Carteira danificada</translation> </message> <message> <location line="+4"/> <source>Error loading wallet.dat: Wallet requires newer version of LVGcoin</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Wallet needed to be rewritten: restart LVGcoin to complete</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Error loading wallet.dat</source> <translation>Erro ao carregar wallet.dat</translation> </message> <message> <location line="-16"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation>Endereço -proxy inválido: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation>Rede desconhecida especificada em -onlynet: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown -socks proxy version requested: %i</source> <translation>Versão desconhecida de proxy -socks requisitada: %i</translation> </message> <message> <location line="+4"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation>Não conseguiu resolver endereço -bind: &apos;%s&apos;</translation> </message> <message> <location line="+2"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation>Não conseguiu resolver endereço -externalip: &apos;%s&apos;</translation> </message> <message> <location line="-24"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Quantia inválida para -paytxfee=&lt;amount&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+44"/> <source>Error: could not start node</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Sending...</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Invalid amount</source> <translation>Quantia inválida</translation> </message> <message> <location line="+1"/> <source>Insufficient funds</source> <translation>Fundos insuficientes</translation> </message> <message> <location line="-34"/> <source>Loading block index...</source> <translation>Carregar índice de blocos...</translation> </message> <message> <location line="-103"/><|fim▁hole|> <location line="+122"/> <source>Unable to bind to %s on this computer. LVGcoin is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="-97"/> <source>Fee per KB to add to transactions you send</source> <translation type="unfinished"/> </message> <message> <location line="+55"/> <source>Invalid amount for -mininput=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Loading wallet...</source> <translation>Carregar carteira...</translation> </message> <message> <location line="+8"/> <source>Cannot downgrade wallet</source> <translation>Impossível mudar a carteira para uma versão anterior</translation> </message> <message> <location line="+1"/> <source>Cannot initialize keypool</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot write default address</source> <translation>Impossível escrever endereço por defeito</translation> </message> <message> <location line="+1"/> <source>Rescanning...</source> <translation>Reexaminando...</translation> </message> <message> <location line="+5"/> <source>Done loading</source> <translation>Carregamento completo</translation> </message> <message> <location line="-167"/> <source>To use the %s option</source> <translation>Para usar a opção %s</translation> </message> <message> <location line="+14"/> <source>Error</source> <translation>Erro</translation> </message> <message> <location line="+6"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation>Deverá definir rpcpassword=&lt;password&gt; no ficheiro de configuração: %s Se o ficheiro não existir, crie-o com permissões de leitura apenas para o dono.</translation> </message> </context> </TS><|fim▁end|>
<source>Add a node to connect to and attempt to keep the connection open</source> <translation>Adicione um nó ao qual se ligar e tentar manter a ligação aberta</translation> </message> <message>
<|file_name|>CTMBuilderOptions.java<|end_file_name|><|fim▁begin|>/* * Copyright 2013 Maksim Kisilyov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.luxoft.p4ctm; import java.io.*; import java.util.Properties; public class CTMBuilderOptions { private String server; private String user; private String password; private String fileSpec; private String requirementIdRegex; private String typeOfChangeRegex; private String acceptedTypesOfChangeRegex; private String clDescriptionsOverridePath; private String requirementsInputFile; private String requirementsInputRegex; private String requirementsFilterFile; private String matrixOutputFile; public CTMBuilderOptions() { this.server = "perforce:1666"; this.user = "anonymous"; this.password = ""; this.fileSpec = "//depot/..."; this.requirementIdRegex = "(?m)^\\[[xX]\\] TTS/Ref : (.*?)\\s*?$"; this.typeOfChangeRegex = "(?m)^\\[[xX]\\] (?:BF|FE|OP|QI|IN|OT) = (.*?)\\s*?$"; this.acceptedTypesOfChangeRegex = "BugFix|Feature"; this.clDescriptionsOverridePath = null; this.requirementsInputFile = null; this.requirementsInputRegex = "(?ms)CTRS ID: (?<rid>.*?)$.*?Object Heading: (?<name>.*?)$.*?Object Text: (?<desc>.*?)$"; this.requirementsFilterFile = null;<|fim▁hole|> public CTMBuilderOptions(String server, String user, String password, String fileSpec, String requirementIdSearchPattern, String typeOfChangeRegex, String acceptedTypesOfChangeRegex, String CLDescriptionsInputPath, String requirementsInputFile, String requirementsInputRegex, String requirementsFilterFile, String matrixOutputFile) { this.server = server; this.user = user; this.password = password; this.fileSpec = fileSpec; this.requirementIdRegex = requirementIdSearchPattern; this.typeOfChangeRegex = typeOfChangeRegex; this.acceptedTypesOfChangeRegex = acceptedTypesOfChangeRegex; this.clDescriptionsOverridePath = CLDescriptionsInputPath; this.requirementsInputFile = requirementsInputFile; this.requirementsInputRegex = requirementsInputRegex; this.requirementsFilterFile = requirementsFilterFile; this.matrixOutputFile = matrixOutputFile; } public void loadFromXML(File input) throws IOException { new XMLIO().readXML(input); } public void storeToXML(File output) throws IOException { new XMLIO().writeXML(output); } public String getServer() { return server; } public String getUser() { return user; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public String getFileSpec() { return fileSpec; } public String getRequirementIdRegex() { return requirementIdRegex; } public String getTypeOfChangeRegex() { return typeOfChangeRegex; } public String getAcceptedTypesOfChangeRegex() { return acceptedTypesOfChangeRegex; } public String getCLDescriptionsOverridePath() { return clDescriptionsOverridePath; } public String getRequirementsInputFile() { return requirementsInputFile; } public String getRequirementsInputRegex() { return requirementsInputRegex; } public String getRequirementsFilterFile() { return requirementsFilterFile; } public String getMatrixOutputFile() { return matrixOutputFile; } private interface Archive { String fill(String key, String value); } private class XMLIO extends Properties { private void serialize(Archive archive) { server = archive.fill("server", server); user = archive.fill("user", user); fileSpec = archive.fill("file spec", fileSpec); requirementIdRegex = archive.fill("requirement id regex", requirementIdRegex); typeOfChangeRegex = archive.fill("type of change regex", typeOfChangeRegex); acceptedTypesOfChangeRegex = archive.fill("accepted types of change regex", acceptedTypesOfChangeRegex); clDescriptionsOverridePath = archive.fill("cl descriptions override path", clDescriptionsOverridePath); requirementsInputFile = archive.fill("requirements input file", requirementsInputFile); requirementsInputRegex = archive.fill("requirements input regex", requirementsInputRegex); requirementsFilterFile = archive.fill("requirements filter file", requirementsFilterFile); matrixOutputFile = archive.fill("output file", matrixOutputFile); } public void readXML(File file) throws IOException { try (InputStream inputStream = new FileInputStream(file)) { loadFromXML(inputStream); serialize(new InputArchive()); } } public void writeXML(File file) throws IOException { try (OutputStream outputStream = new FileOutputStream(file)) { serialize(new OutputArchive()); storeToXML(outputStream, null); } } private class InputArchive implements Archive { @Override public String fill(String key, String value) { return getProperty(key); } } private class OutputArchive implements Archive { @Override public String fill(String key, String value) { if (key == null || value == null) { return null; } setProperty(key, value); return value; } } } }<|fim▁end|>
this.matrixOutputFile = "TM.xls"; }
<|file_name|>reference.py<|end_file_name|><|fim▁begin|>from collections import namedtuple from uuid import uuid4 from GEMEditor.model.classes.annotation import Annotation class Reference: """ ReferenceItem contains the information a pubmed or similar literature reference Authors are saved as author instances """ def __init__(self, id=None, pmid="", pmc="", doi="", url="", authors=None, year="", title="", journal="", abstract=""): super(Reference, self).__init__() self._linked_items = set() self.id = id or str(uuid4()) self.pmid = pmid self.pmc = pmc self.doi = doi self.url = url if authors is None: self.authors = [] else: self.authors = authors self.year = year self.title = title self.journal = journal self.abstract = abstract @property def linked_items(self):<|fim▁hole|> @property def annotation(self): result = set() if self.pmid: result.add(Annotation("pubmed", self.pmid)) if self.pmc: result.add(Annotation("pmc", self.pmc)) if self.doi: result.add(Annotation("doi", self.doi)) return result def add_link(self, item, reciprocal=True): """ Remove reference link from item All items that inherit from this class should be able to link to each other. Parameters ---------- reference: GEMEditor.model.classes.base.ReferenceLink reciprocal: bool """ self._linked_items.add(item) if reciprocal: item.add_reference(self, reciprocal=False) def remove_link(self, item, reciprocal=True): """ Remove reference link from item Parameters ---------- item: GEMEditor.model.classes.base.ReferenceLink reciprocal: bool """ self._linked_items.discard(item) if reciprocal: item.remove_reference(self, reciprocal=False) def remove_all_links(self): """ Remove all reference links """ for item in self.linked_items: self.remove_link(item, reciprocal=True) def reference_string(self): """ Get the authors part of the usual citation of scientific literature i.e.: Lastname F et al., YYYY if there are more than 2 authors Lastname1 F1 and Lastname2 F2, YYYY if there are 2 authors Lastname F, YYYY if there is only one author Input tuple with (lastname, firstname, initials) """ # If there are more than 2 authors return a string if len(self.authors) > 2: return "{0} et al., {1}".format(self.authors[0].display_str, self.year) elif len(self.authors) == 2: return "{0} and {1}, {2}".format(self.authors[0].display_str, self.authors[1].display_str, self.year) elif self.authors: return "{0}, {1}".format(self.authors[0].display_str, self.year) else: return "" def __str__(self): id_strings = [] for attrib in ("pmid", "pmc", "doi"): if getattr(self, attrib): id_strings.append("{0}: {1}".format(attrib.upper(), getattr(self, attrib))) return "ID: {id}\n" \ "Authors: {authors}\n" \ "Title: {title}\n" \ "{id_strings}".format(id=self.id, authors=self.reference_string(), title=self.title, id_strings="; ".join(id_strings)) class Author(namedtuple("Author", ["lastname", "firstname", "initials"])): __slots__ = () def __new__(cls, lastname="", firstname="", initials=""): self = super(Author, cls).__new__(cls, lastname=lastname, firstname=firstname, initials=initials) return self @property def display_str(self): if self.initials: return "{0} {1}".format(self.lastname, self.initials) else: return self.lastname<|fim▁end|>
return self._linked_items.copy()
<|file_name|>hello.rs<|end_file_name|><|fim▁begin|>fn main() { println!("HW"); // macro!? println!("{} days", 31); println!("{0}, this is {1}. {1}, this is {0}", "Alice", "Bob"); println!("{subj} {verb} {pred}",<|fim▁hole|> pred="ow", subj="su", verb="jump"); }<|fim▁end|>
<|file_name|>handleit_form.js<|end_file_name|><|fim▁begin|>var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; } function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } var HandleitForm = function (_React$Component) { _inherits(HandleitForm, _React$Component); function HandleitForm(props) { _classCallCheck(this, HandleitForm); // this.hide_elements(); var _this = _possibleConstructorReturn(this, (HandleitForm.__proto__ || Object.getPrototypeOf(HandleitForm)).call(this, props)); _this.hide_elements = function () { $('#navID').css('display', 'none'); $('#userNav').css('display', 'none'); // $('#noUserNav').css('display', 'none') $('#imageBar').css('display', 'none'); $('#footerID').css('display', 'none'); // $('#noUserNav').css('display', 'none') }; _this.onClick_jspdf = function () { var doc = new jspdf.jsPDF({ format: "letter" }); var source = window.document.getElementsByTagName("body")[0]; doc.html(source, { callback: function callback(doc) { doc.output("dataurlnewwindow"); }, x: 15, y: 15 }); }; _this.onClick_print = function () { window.print(); }; return _this; } _createClass(HandleitForm, [{ key: 'roundCurrency', value: function roundCurrency(n) { var mult = 100, value = void 0; value = parseFloat((n * mult).toFixed(6)); return Math.round(value) / mult; } }, { key: 'render', value: function render() { var _this2 = this; var workitems = void 0, docApp = void 0; if (this.props.type == "project") { var proj = this.props.projectData; workitems = proj.workItems; docApp = proj.documentPackage.application; } else if (this.props.type == "assessment") { var assessment = this.props.assessmentData; workitems = assessment.workItems; docApp = assessment.documentPackage.application; } var name = docApp.name.middle && docApp.name.middle.length > 0 ? docApp.name.first + ' ' + docApp.name.middle + ' ' + docApp.name.last : docApp.name.first + ' ' + docApp.name.last; var address = docApp.address.line_1; if (docApp.address.line_2 && docApp.address.line_2.length > 0) { address += '| ' + docApp.address.line_2 + '\n'; } var total_volunteers = 0; return React.createElement( 'div', null, React.createElement( 'div', { id: 'buttons-container', className: 'no-print' }, React.createElement( 'button', { onClick: this.onClick_print }, 'Print' ) ), React.createElement( 'div', { id: 'cblock-container' }, React.createElement('img', { src: '/images/app_project/letterhead.png' }) ), React.createElement( 'p', { id: 'info-container' }, 'Catalyst Partnerships is a non-proft general contractor. We bring together useful resources and caring volunteers to meet the needs of under-resourced people in our community. \u201CHandle-It\u201D volunteers can provide minor home repairs to improve the safety of the home for no fee. Handle-It Volunteers are skilled handy men and women who have undergone and passed background checks and are insured by Catalyst. To the extent required by law, Catalyst is duly licensed, bonded, and insured to perform such work' ), React.createElement( 'h1', { id: 'doc-header' }, 'HANDLE-IT WORK AGREEMENT' ), React.createElement( 'table', null, React.createElement( 'tbody', null, React.createElement( 'tr', null, React.createElement( 'td', null, React.createElement( 'b', null, 'Property Owner:' ) ), React.createElement( 'td', null, name ) ), React.createElement( 'tr', null, React.createElement( 'td', null, React.createElement( 'b', null, 'Address:' ) ), React.createElement( 'td', null, React.createElement( 'div', null, address ), React.createElement( 'div', null, docApp.address.city,<|fim▁hole|> ' ', docApp.address.zip ) ) ), React.createElement( 'tr', null, React.createElement( 'td', null, React.createElement( 'b', null, 'Phone:' ) ), React.createElement( 'td', null, docApp.phone.preferred ) ), React.createElement( 'tr', null, React.createElement( 'td', null, React.createElement( 'b', null, 'Email:' ) ), React.createElement( 'td', null, docApp.email ) ) ) ), React.createElement( 'h2', null, 'Work Requested' ), workitems.map(function (workItem) { var workitemCost = 0; workItem.materialsItems.forEach(function (materialsItem) { workitemCost += _this2.roundCurrency(materialsItem.price * materialsItem.quantity); }); total_volunteers += workItem.volunteers_required; return React.createElement( 'div', { className: 'workitem-total-container', key: workItem._id }, React.createElement( 'div', { key: "wi-" + workItem._id, className: 'workitem-container' }, React.createElement( 'table', null, React.createElement( 'tbody', null, React.createElement( 'tr', null, React.createElement( 'th', null, 'Work Item Name' ), React.createElement( 'td', null, workItem.name ) ), React.createElement( 'tr', null, React.createElement( 'th', null, 'Description' ), React.createElement( 'td', null, workItem.description ) ), workItem.project_comments && workItem.project_comments.length > 0 ? React.createElement( 'tr', null, React.createElement( 'th', null, 'Project Comments' ), React.createElement( 'td', null, workItem.project_comments ) ) : null, React.createElement( 'tr', null, React.createElement( 'th', null, 'Cost' ), React.createElement( 'td', null, workitemCost.toFixed(2) ) ) ) ) ) ); }), React.createElement( 'p', { id: 'price-p' }, 'Price: Catalyst Partnerships shall provide resources for the work. The cost of this project to the property owner is $0.' ), React.createElement( 'p', null, 'Scope: The scope of Handle-It Projects are jobs that will require 1-3 volunteers one day\u2019s time and cost Catalyst $500 or less. In some cases, the property owner may already own the item that needs installation. If, after the Handle-It volunteer examines the scope of work, it is decided that the job would require more extensive labor and/or materials, this project may be recommended for consideration as a full Catalyst Project. This will require further fnancial vetting and estimation of the necessary work to restore the home to safety' ), React.createElement( 'p', null, 'Volunteer Labor: Catalyst Partnerships is responsible for providing volunteer labor required to complete this project. Catalyst Partnerships is also responsible for providing materials, tools, and all other resources required to complete this project. Due to the nature of this non-proft, volunteer activity, property owner understands that the quality of service and/or craftsmanship received may not refect professional standards.' ), React.createElement( 'p', { id: 'acceptance-p' }, 'Acceptance of Contract: The above price, specifcations and conditions are satisfactory and are hereby accepted. Catalyst Partnerships is authorized to furnish all materials and volunteer labor required to complete the project as stated.' ), React.createElement( 'div', { className: 'signatures-container' }, React.createElement( 'div', null, 'Date __________________' ), React.createElement( 'div', null, React.createElement( 'div', null, 'X_______________________________________________' ), React.createElement( 'div', null, 'Property Owner' ) ) ), React.createElement( 'div', { className: 'signatures-container' }, React.createElement( 'div', { className: '' }, 'Date __________________' ), React.createElement( 'div', { className: '' }, React.createElement( 'div', null, 'X_______________________________________________' ), React.createElement( 'div', null, 'Catalyst Handle-It Volunteer' ) ) ), React.createElement( 'p', null, 'Please sign two copies \u2013 one for the homeowner, the other for the Catalyst offce' ) ); } }]); return HandleitForm; }(React.Component); function loadReact() { console.log(type, assessment_id); if (type == "project") { $.ajax({ url: "/app_project/projects/" + project_id, type: "GET", success: function success(data) { console.log(data); ReactDOM.render(React.createElement(HandleitForm, { type: type, projectData: data }), document.getElementById("pdf_container")); } }); } else if (type == "assessment") { $.ajax({ url: "/app_project/site_assessments/" + assessment_id, type: "GET", success: function success(data) { console.log(data); ReactDOM.render(React.createElement(HandleitForm, { type: type, assessmentData: data }), document.getElementById("pdf_container")); } }); } } loadReact();<|fim▁end|>
', ', docApp.address.state,
<|file_name|>htmlparagraphelement.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::utils::{DOMString, null_string, ErrorResult}; use dom::htmlelement::HTMLElement; <|fim▁hole|> parent: HTMLElement } impl HTMLParagraphElement { pub fn Align(&self) -> DOMString { null_string } pub fn SetAlign(&mut self, _align: &DOMString, _rv: &mut ErrorResult) { } }<|fim▁end|>
pub struct HTMLParagraphElement {
<|file_name|>Files_assertHasDigest_DigestBytes_Test.java<|end_file_name|><|fim▁begin|>/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Copyright 2012-2020 the original author or authors. */ package org.assertj.core.internal.files; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatExceptionOfType; import static org.assertj.core.api.Assertions.assertThatNullPointerException; import static org.assertj.core.api.Assertions.catchThrowable; import static org.assertj.core.error.ShouldBeFile.shouldBeFile; import static org.assertj.core.error.ShouldBeReadable.shouldBeReadable; import static org.assertj.core.error.ShouldExist.shouldExist; import static org.assertj.core.error.ShouldHaveDigest.shouldHaveDigest; import static org.assertj.core.util.FailureMessages.actualIsNull; import static org.mockito.ArgumentMatchers.any; import static org.mockito.BDDMockito.given; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.UncheckedIOException; import java.security.MessageDigest; import org.assertj.core.api.AssertionInfo; import org.assertj.core.internal.DigestDiff; import org.assertj.core.internal.Files; import org.assertj.core.internal.FilesBaseTest; import org.junit.jupiter.api.Test; /** * Tests for <code>{@link Files#assertHasDigest(AssertionInfo, File, MessageDigest, byte[])}</code> * * @author Valeriy Vyrva */ class Files_assertHasDigest_DigestBytes_Test extends FilesBaseTest { private final MessageDigest digest = mock(MessageDigest.class); private final byte[] expected = new byte[0]; @Test void should_fail_if_actual_is_null() { assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> files.assertHasDigest(INFO, null, digest, expected)) .withMessage(actualIsNull()); } @Test void should_fail_with_should_exist_error_if_actual_does_not_exist() { // GIVEN given(actual.exists()).willReturn(false); // WHEN catchThrowable(() -> files.assertHasDigest(INFO, actual, digest, expected)); // THEN verify(failures).failure(INFO, shouldExist(actual)); } @Test void should_fail_if_actual_exists_but_is_not_file() { // GIVEN given(actual.exists()).willReturn(true); given(actual.isFile()).willReturn(false); // WHEN catchThrowable(() -> files.assertHasDigest(INFO, actual, digest, expected)); // THEN verify(failures).failure(INFO, shouldBeFile(actual)); } @Test void should_fail_if_actual_exists_but_is_not_readable() { // GIVEN given(actual.exists()).willReturn(true); given(actual.isFile()).willReturn(true); given(actual.canRead()).willReturn(false); // WHEN catchThrowable(() -> files.assertHasDigest(INFO, actual, digest, expected)); // THEN verify(failures).failure(INFO, shouldBeReadable(actual)); } @Test void should_throw_error_if_digest_is_null() { assertThatNullPointerException().isThrownBy(() -> files.assertHasDigest(INFO, null, (MessageDigest) null, expected)) .withMessage("The message digest algorithm should not be null"); } @Test void should_throw_error_if_expected_is_null() { assertThatNullPointerException().isThrownBy(() -> files.assertHasDigest(INFO, null, digest, (byte[]) null)) .withMessage("The binary representation of digest to compare to should not be null"); } @Test void should_throw_error_wrapping_caught_IOException() throws IOException { // GIVEN IOException cause = new IOException(); given(actual.exists()).willReturn(true); given(actual.isFile()).willReturn(true); given(actual.canRead()).willReturn(true); given(nioFilesWrapper.newInputStream(any())).willThrow(cause); // WHEN Throwable error = catchThrowable(() -> files.assertHasDigest(INFO, actual, digest, expected)); // THEN assertThat(error).isInstanceOf(UncheckedIOException.class) .hasCause(cause); } @Test<|fim▁hole|> Throwable error = catchThrowable(() -> files.assertHasDigest(INFO, actual, unknownDigestAlgorithm, expected)); // THEN assertThat(error).isInstanceOf(IllegalStateException.class) .hasMessage("Unable to find digest implementation for: <UnknownDigestAlgorithm>"); } @Test void should_fail_if_actual_does_not_have_expected_digest() throws IOException { // GIVEN InputStream stream = getClass().getResourceAsStream("/red.png"); given(actual.exists()).willReturn(true); given(actual.isFile()).willReturn(true); given(actual.canRead()).willReturn(true); given(nioFilesWrapper.newInputStream(any())).willReturn(stream); given(digest.digest()).willReturn(new byte[] { 0, 1 }); // WHEN catchThrowable(() -> files.assertHasDigest(INFO, actual, digest, expected)); // THEN verify(failures).failure(INFO, shouldHaveDigest(actual, new DigestDiff("0001", "", digest))); failIfStreamIsOpen(stream); } @Test void should_pass_if_actual_has_expected_digest() throws IOException { // GIVEN InputStream stream = getClass().getResourceAsStream("/red.png"); given(actual.exists()).willReturn(true); given(actual.isFile()).willReturn(true); given(actual.canRead()).willReturn(true); given(nioFilesWrapper.newInputStream(any())).willReturn(stream); given(digest.digest()).willReturn(expected); // WHEN files.assertHasDigest(INFO, actual, digest, expected); // THEN failIfStreamIsOpen(stream); } }<|fim▁end|>
void should_throw_error_wrapping_caught_NoSuchAlgorithmException() { // GIVEN String unknownDigestAlgorithm = "UnknownDigestAlgorithm"; // WHEN
<|file_name|>move_to_front_algo.py<|end_file_name|><|fim▁begin|>from __future__ import print_function from string import ascii_lowercase SYMBOLTABLE = list(ascii_lowercase) def move2front_encode(strng, symboltable): sequence, pad = [], symboltable[::] for char in strng: indx = pad.index(char) sequence.append(indx) pad = [pad.pop(indx)] + pad return sequence def move2front_decode(sequence, symboltable):<|fim▁hole|> chars, pad = [], symboltable[::] for indx in sequence: char = pad[indx] chars.append(char) pad = [pad.pop(indx)] + pad return ''.join(chars) if __name__ == '__main__': for s in ['broood', 'bananaaa', 'hiphophiphop']: encode = move2front_encode(s, SYMBOLTABLE) print('%14r encodes to %r' % (s, encode), end=', ') decode = move2front_decode(encode, SYMBOLTABLE) print('which decodes back to %r' % decode)<|fim▁end|>
<|file_name|>test_image_metadata.py<|end_file_name|><|fim▁begin|># Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg import webob from nova.api.openstack.compute import image_metadata from nova.openstack.common import jsonutils from nova import test from nova.tests.api.openstack import fakes CONF = cfg.CONF class ImageMetaDataTest(test.TestCase): def setUp(self): super(ImageMetaDataTest, self).setUp() fakes.stub_out_glance(self.stubs) self.controller = image_metadata.Controller()<|fim▁hole|> expected = {'metadata': {'key1': 'value1'}} self.assertEqual(res_dict, expected) def test_show(self): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1') res_dict = self.controller.show(req, '123', 'key1') self.assertIn('meta', res_dict) self.assertEqual(len(res_dict['meta']), 1) self.assertEqual('value1', res_dict['meta']['key1']) def test_show_not_found(self): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key9') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '123', 'key9') def test_show_image_not_found(self): req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '100', 'key9') def test_create(self): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata') req.method = 'POST' body = {"metadata": {"key7": "value7"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" res = self.controller.create(req, '123', body) expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}} self.assertEqual(expected_output, res) def test_create_image_not_found(self): req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata') req.method = 'POST' body = {"metadata": {"key7": "value7"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, '100', body) def test_update_all(self): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata') req.method = 'PUT' body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" res = self.controller.update_all(req, '123', body) expected_output = {'metadata': {'key9': 'value9'}} self.assertEqual(expected_output, res) def test_update_all_image_not_found(self): req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata') req.method = 'PUT' body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPNotFound, self.controller.update_all, req, '100', body) def test_update_item(self): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1') req.method = 'PUT' body = {"meta": {"key1": "zz"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" res = self.controller.update(req, '123', 'key1', body) expected_output = {'meta': {'key1': 'zz'}} self.assertEqual(res, expected_output) def test_update_item_image_not_found(self): req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1') req.method = 'PUT' body = {"meta": {"key1": "zz"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, '100', 'key1', body) def test_update_item_bad_body(self): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1') req.method = 'PUT' body = {"key1": "zz"} req.body = '' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, '123', 'key1', body) def test_update_item_too_many_keys(self): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1') req.method = 'PUT' overload = {} for num in range(CONF.quota_metadata_items + 1): overload['key%s' % num] = 'value%s' % num body = {'meta': overload} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, '123', 'key1', body) def test_update_item_body_uri_mismatch(self): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/bad') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, '123', 'bad', body) def test_delete(self): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1') req.method = 'DELETE' res = self.controller.delete(req, '123', 'key1') self.assertIsNone(res) def test_delete_not_found(self): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah') req.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, '123', 'blah') def test_delete_image_not_found(self): req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1') req.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, '100', 'key1') def test_too_many_metadata_items_on_create(self): data = {"metadata": {}} for num in range(CONF.quota_metadata_items + 1): data['metadata']['key%i' % num] = "blah" req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata') req.method = 'POST' req.body = jsonutils.dumps(data) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, '123', data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, '123', data) def test_too_many_metadata_items_on_put(self): self.flags(quota_metadata_items=1) req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah') req.method = 'PUT' body = {"meta": {"blah": "blah"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, req, '123', 'blah', body) def test_image_not_authorized_update(self): image_id = 131 # see nova.tests.api.openstack.fakes:_make_image_fixtures req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1' % image_id) req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, req, image_id, 'key1', body) def test_image_not_authorized_update_all(self): image_id = 131 # see nova.tests.api.openstack.fakes:_make_image_fixtures req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1' % image_id) req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPForbidden, self.controller.update_all, req, image_id, body) def test_image_not_authorized_create(self): image_id = 131 # see nova.tests.api.openstack.fakes:_make_image_fixtures req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1' % image_id) req.method = 'POST' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, req, image_id, body)<|fim▁end|>
def test_index(self): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata') res_dict = self.controller.index(req, '123')
<|file_name|>tls_extension.go<|end_file_name|><|fim▁begin|>package handshake import ( "bytes" "encoding/binary" "errors" "fmt" "github.com/bifurcation/mint" "github.com/lucas-clemente/quic-go/internal/protocol" "github.com/lucas-clemente/quic-go/internal/utils" ) type transportParameterID uint16 const quicTLSExtensionType = 0xff5 const ( initialMaxStreamDataParameterID transportParameterID = 0x0 initialMaxDataParameterID transportParameterID = 0x1 initialMaxBidiStreamsParameterID transportParameterID = 0x2 idleTimeoutParameterID transportParameterID = 0x3 maxPacketSizeParameterID transportParameterID = 0x5 statelessResetTokenParameterID transportParameterID = 0x6 initialMaxUniStreamsParameterID transportParameterID = 0x8 disableMigrationParameterID transportParameterID = 0x9 ) type clientHelloTransportParameters struct { InitialVersion protocol.VersionNumber Parameters TransportParameters } func (p *clientHelloTransportParameters) Marshal() []byte { const lenOffset = 4 b := &bytes.Buffer{} utils.BigEndian.WriteUint32(b, uint32(p.InitialVersion)) b.Write([]byte{0, 0}) // length. Will be replaced later p.Parameters.marshal(b) data := b.Bytes() binary.BigEndian.PutUint16(data[lenOffset:lenOffset+2], uint16(len(data)-lenOffset-2)) return data } func (p *clientHelloTransportParameters) Unmarshal(data []byte) error { if len(data) < 6 { return errors.New("transport parameter data too short") } p.InitialVersion = protocol.VersionNumber(binary.BigEndian.Uint32(data[:4]))<|fim▁hole|> } return p.Parameters.unmarshal(data) } type encryptedExtensionsTransportParameters struct { NegotiatedVersion protocol.VersionNumber SupportedVersions []protocol.VersionNumber Parameters TransportParameters } func (p *encryptedExtensionsTransportParameters) Marshal() []byte { b := &bytes.Buffer{} utils.BigEndian.WriteUint32(b, uint32(p.NegotiatedVersion)) b.WriteByte(uint8(4 * len(p.SupportedVersions))) for _, v := range p.SupportedVersions { utils.BigEndian.WriteUint32(b, uint32(v)) } lenOffset := b.Len() b.Write([]byte{0, 0}) // length. Will be replaced later p.Parameters.marshal(b) data := b.Bytes() binary.BigEndian.PutUint16(data[lenOffset:lenOffset+2], uint16(len(data)-lenOffset-2)) return data } func (p *encryptedExtensionsTransportParameters) Unmarshal(data []byte) error { if len(data) < 5 { return errors.New("transport parameter data too short") } p.NegotiatedVersion = protocol.VersionNumber(binary.BigEndian.Uint32(data[:4])) numVersions := int(data[4]) if numVersions%4 != 0 { return fmt.Errorf("invalid length for version list: %d", numVersions) } numVersions /= 4 data = data[5:] if len(data) < 4*numVersions+2 /*length field for the parameter list */ { return errors.New("transport parameter data too short") } p.SupportedVersions = make([]protocol.VersionNumber, numVersions) for i := 0; i < numVersions; i++ { p.SupportedVersions[i] = protocol.VersionNumber(binary.BigEndian.Uint32(data[:4])) data = data[4:] } paramsLen := int(binary.BigEndian.Uint16(data[:2])) data = data[2:] if len(data) != paramsLen { return fmt.Errorf("expected transport parameters to be %d bytes long, have %d", paramsLen, len(data)) } return p.Parameters.unmarshal(data) } type tlsExtensionBody struct { data []byte } var _ mint.ExtensionBody = &tlsExtensionBody{} func (e *tlsExtensionBody) Type() mint.ExtensionType { return quicTLSExtensionType } func (e *tlsExtensionBody) Marshal() ([]byte, error) { return e.data, nil } func (e *tlsExtensionBody) Unmarshal(data []byte) (int, error) { e.data = data return len(data), nil }<|fim▁end|>
paramsLen := int(binary.BigEndian.Uint16(data[4:6])) data = data[6:] if len(data) != paramsLen { return fmt.Errorf("expected transport parameters to be %d bytes long, have %d", paramsLen, len(data))
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # flake8: noqa # Disable Flake8 because of all the sphinx imports # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,<|fim▁hole|># "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Airflow documentation build configuration file, created by # sphinx-quickstart on Thu Oct 9 20:50:01 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. """Configuration of Airflow Docs""" import os import sys from typing import Dict import airflow from airflow.configuration import default_config_yaml try: import sphinx_airflow_theme # pylint: disable=unused-import airflow_theme_is_available = True except ImportError: airflow_theme_is_available = False autodoc_mock_imports = [ 'MySQLdb', 'adal', 'analytics', 'azure', 'azure.cosmos', 'azure.datalake', 'azure.mgmt', 'boto3', 'botocore', 'bson', 'cassandra', 'celery', 'cloudant', 'cryptography', 'cx_Oracle', 'datadog', 'distributed', 'docker', 'google', 'google_auth_httplib2', 'googleapiclient', 'grpc', 'hdfs', 'httplib2', 'jaydebeapi', 'jenkins', 'jira', 'kubernetes', 'mesos', 'msrestazure', 'pandas', 'pandas_gbq', 'paramiko', 'pinotdb', 'psycopg2', 'pydruid', 'pyhive', 'pyhive', 'pymongo', 'pymssql', 'pysftp', 'qds_sdk', 'redis', 'simple_salesforce', 'slackclient', 'smbclient', 'snowflake', 'sshtunnel', 'tenacity', 'vertica_python', 'winrm', 'zdesk', ] # Hack to allow changing for piece of the code to behave differently while # the docs are being built. The main objective was to alter the # behavior of the utils.apply_default that was hiding function headers os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.join(os.path.dirname(__file__), 'exts')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinx.ext.graphviz', 'sphinxarg.ext', 'sphinxcontrib.httpdomain', 'sphinxcontrib.jinja', 'sphinx.ext.intersphinx', 'autoapi.extension', 'exampleinclude', 'docroles', 'removemarktransform', ] autodoc_default_options = { 'show-inheritance': True, 'members': True } jinja_contexts = { 'config_ctx': {"configs": default_config_yaml()} } viewcode_follow_imported_members = True # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Airflow' # copyright = u'' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. # version = '1.0.0' version = airflow.__version__ # The full version, including alpha/beta/rc tags. # release = '1.0.0' release = airflow.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [ '_api/airflow/_vendor', '_api/airflow/api', '_api/airflow/bin', '_api/airflow/config_templates', '_api/airflow/configuration', '_api/airflow/contrib/auth', '_api/airflow/contrib/example_dags', '_api/airflow/contrib/index.rst', '_api/airflow/contrib/kubernetes', '_api/airflow/contrib/task_runner', '_api/airflow/contrib/utils', '_api/airflow/dag', '_api/airflow/default_login', '_api/airflow/example_dags', '_api/airflow/exceptions', '_api/airflow/index.rst', '_api/airflow/jobs', '_api/airflow/lineage', '_api/airflow/logging_config', '_api/airflow/macros', '_api/airflow/migrations', '_api/airflow/plugins_manager', '_api/airflow/security', '_api/airflow/serialization', '_api/airflow/settings', '_api/airflow/sentry', '_api/airflow/stats', '_api/airflow/task', '_api/airflow/ti_deps', '_api/airflow/utils', '_api/airflow/version', '_api/airflow/www', '_api/airflow/www_rbac', '_api/main', 'autoapi_templates', 'howto/operator/gcp/_partials', ] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. keep_warnings = True intersphinx_mapping = { 'boto3': ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None), 'mongodb': ('https://api.mongodb.com/python/current/', None), 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None), 'python': ('https://docs.python.org/3/', None), 'requests': ('https://requests.readthedocs.io/en/master/', None), 'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', None), 'hdfs': ('https://hdfscli.readthedocs.io/en/latest/', None), # google-cloud-python 'google-cloud-automl': ('https://googleapis.dev/python/automl/latest', None), 'google-cloud-bigquery': ('https://googleapis.dev/python/bigquery/latest', None), 'google-cloud-bigquery-datatransfer': ('https://googleapis.dev/python/bigquerydatatransfer/latest', None), 'google-cloud-bigquery-storage': ('https://googleapis.dev/python/bigquerystorage/latest', None), 'google-cloud-bigtable': ('https://googleapis.dev/python/bigtable/latest', None), 'google-cloud-container': ('https://googleapis.dev/python/container/latest', None), 'google-cloud-core': ('https://googleapis.dev/python/google-cloud-core/latest', None), 'google-cloud-datastore': ('https://googleapis.dev/python/datastore/latest', None), 'google-cloud-dlp': ('https://googleapis.dev/python/dlp/latest', None), 'google-cloud-kms': ('https://googleapis.dev/python/cloudkms/latest', None), 'google-cloud-language': ('https://googleapis.dev/python/language/latest', None), 'google-cloud-pubsub': ('https://googleapis.dev/python/pubsub/latest', None), 'google-cloud-redis': ('https://googleapis.dev/python/redis/latest', None), 'google-cloud-spanner': ('https://googleapis.dev/python/spanner/latest', None), 'google-cloud-speech': ('https://googleapis.dev/python/speech/latest', None), 'google-cloud-storage': ('https://googleapis.dev/python/storage/latest', None), 'google-cloud-tasks': ('https://googleapis.dev/python/cloudtasks/latest', None), 'google-cloud-texttospeech': ('https://googleapis.dev/python/texttospeech/latest', None), 'google-cloud-translate': ('https://googleapis.dev/python/translation/latest', None), 'google-cloud-videointelligence': ('https://googleapis.dev/python/videointelligence/latest', None), 'google-cloud-vision': ('https://googleapis.dev/python/vision/latest', None), } # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' if airflow_theme_is_available: html_theme = 'sphinx_airflow_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] import sphinx_rtd_theme # pylint: disable=wrong-import-position,wrong-import-order html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "Airflow Documentation" # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = "" # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None html_favicon = "../airflow/www/static/pin_32.png" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # A list of JavaScript filename. The entry must be a filename string or a # tuple containing the filename string and the attributes dictionary. The # filename must be relative to the html_static_path, or a full URI with # scheme like http://example.org/script.js. html_js_files = ['jira-links.js'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. if airflow_theme_is_available: html_sidebars = { '**': [ 'version-selector.html', 'searchbox.html', 'globaltoc.html', ] } # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Airflowdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # type: Dict[str,str] # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'Airflow.tex', u'Airflow Documentation', u'Apache Airflow', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'airflow', u'Airflow Documentation', [u'Apache Airflow'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [( 'index', 'Airflow', u'Airflow Documentation', u'Apache Airflow', 'Airflow', 'Airflow is a system to programmatically author, schedule and monitor data pipelines.', 'Miscellaneous' ), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # sphinx-autoapi configuration # See: # https://sphinx-autoapi.readthedocs.io/en/latest/config.html # Paths (relative or absolute) to the source code that you wish to generate # your API documentation from. autoapi_dirs = [ os.path.abspath('../airflow'), ] # A directory that has user-defined templates to override our default templates. autoapi_template_dir = 'autoapi_templates' # A list of patterns to ignore when finding files autoapi_ignore = [ # These modules are backcompat shims, don't build docs for them '*/airflow/contrib/operators/s3_to_gcs_transfer_operator.py', '*/airflow/contrib/operators/gcs_to_gcs_transfer_operator.py', '*/airflow/contrib/operators/gcs_to_gcs_transfer_operator.py', '*/node_modules/*', '*/migrations/*', ] # Keep the AutoAPI generated files on the filesystem after the run. # Useful for debugging. autoapi_keep_files = True # Relative path to output the AutoAPI files into. This can also be used to place the generated documentation # anywhere in your documentation hierarchy. autoapi_root = '_api' # -- Options for examole include ------------------------------------------ exampleinclude_sourceroot = os.path.abspath('..') # -- Additional HTML Context variable html_context = { # Google Analytics ID. # For more information look at: # https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/layout.html#L222-L232 'theme_analytics_id': 'UA-140539454-1', } if airflow_theme_is_available: html_context = { # Variables used to build a button for editing the source code # # The path is created according to the following template: # # https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/ # {{ theme_vcs_pageview_mode|default("blob") }}/{{ github_version }}{{ conf_py_path }} # {{ pagename }}{{ suffix }} # # More information: # https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/templates/doc_builder/conf.py.tmpl#L100-L103 # https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/breadcrumbs.html#L45 # https://github.com/apache/airflow-site/blob/91f760c/sphinx_airflow_theme/sphinx_airflow_theme/suggest_change_button.html#L36-L40 # 'theme_vcs_pageview_mode': 'edit', 'conf_py_path': '/docs/', 'github_user': 'apache', 'github_repo': 'airflow', 'github_version': 'master', 'display_github': 'master', 'suffix': '.rst', }<|fim▁end|>
# software distributed under the License is distributed on an
<|file_name|>StringProcessingTestBase.py<|end_file_name|><|fim▁begin|>import unittest class StringProcessingTestBase(unittest.TestCase): # The backslash character. Needed since there are limitations when # using backslashes at the end of raw-strings in front of the # terminating " or '. bs = "\\" # Basic test strings all StringProcessing functions should test. test_strings = [ r"out1 'escaped-escape: \\ ' out2", r"out1 'escaped-quote: \' ' out2", r"out1 'escaped-anything: \X ' out2", r"out1 'two escaped escapes: \\\\ ' out2", r"out1 'escaped-quote at end: \'' out2", r"out1 'escaped-escape at end: \\' out2", r"out1 'str1' out2 'str2' out2", r"out1 \' 'str1' out2 'str2' out2", r"out1 \\\' 'str1' out2 'str2' out2", r"out1 \\ 'str1' out2 'str2' out2", r"out1 \\\\ 'str1' out2 'str2' out2", r"out1 \\'str1' out2 'str2' out2", r"out1 \\\\'str1' out2 'str2' out2", r"out1 'str1''str2''str3' out2", r"", r"out1 out2 out3", bs, 2 * bs] # Test string for multi-pattern tests (since we want to variate the # pattern, not the test string). multi_pattern_test_string = (r"abcabccba###\\13q4ujsabbc\+'**'ac" r"###.#.####-ba") # Multiple patterns for the multi-pattern tests. multi_patterns = [r"abc", r"ab", r"ab|ac", 2 * bs, r"#+", r"(a)|(b)|(#.)", r"(?:a(b)*c)+", r"1|\+"] # Test strings for the remove_empty_matches feature (alias auto-trim). auto_trim_test_pattern = r";" auto_trim_test_strings = [r";;;;;;;;;;;;;;;;", r"\\;\\\\\;\\#;\\\';;\;\\\\;+ios;;", r"1;2;3;4;5;6;", r"1;2;3;4;5;6;7", r"", r"Hello world", r"\;", r"\\;", r"abc;a;;;;;asc"] # Test strings for search-in-between functions. search_in_between_begin_pattern = r"(" search_in_between_end_pattern = r")" search_in_between_test_strings = [ r"()assk(This is a word)and((in a word) another ) one anyway.", r"bcc5(((((((((((((((((((1)2)3)))))))))))))))))", r"Let's (do (it ) more ) complicated ) ) ) () (hello.)", r"()assk\\(This\ is a word\)and((in a\\\ word\\\\\) another \)) " r"one anyway.", r"bcc5\(\(\((((((\\\(((((((((((1)2)3))\\\\\)))))))))))))\)\)", r"Let's \(do (it ) more ) \\ complicated ) ) ) () (hello.)\\z"] @staticmethod def _construct_message(func, args, kwargs): """ Constructs the error message for the call result assertions. :param func: The function that was called. :param args: The argument tuple the function was invoked with. :param kwargs: The named arguments dict the function was invoked with. :param return: The error message. """ args = [repr(x) for x in args] kwargs = [str(key) + '=' + repr(value) for key, value in kwargs.items()] return "Called {}({}).".format(func.__name__, ", ".join(args + kwargs)) def assertResultsEqual(self, func, invocation_and_results, postprocess=lambda result: result): """ Tests each given invocation against the given results with the specified function. :param func: The function to test. :param invocation_and_results: A dict containing the invocation tuple as key and the result as value. :param postprocess: A function that shall process the returned result from the tested function. The function must accept only one parameter as postprocessing input. Performs no postprocessing by default. """ for args, result in invocation_and_results.items(): self.assertEqual( postprocess(func(*args)), result, self._construct_message(func, args, {})) def assertResultsEqualEx(self, func, invocation_and_results, postprocess=lambda result: result): """ Tests each given invocation against the given results with the specified function. This is an extended version of ``assertResultsEqual()`` that supports also ``**kwargs``. :param func: The function to test. :param invocation_and_results: A dict containing the invocation tuple as key and the result as value. The tuple contains (args, kwargs). :param postprocess: A function that shall process the returned result from the tested function. The function must accept only one parameter as postprocessing input. Performs no postprocessing by default. """<|fim▁hole|> self._construct_message(func, args, kwargs))<|fim▁end|>
for (args, kwargs), result in invocation_and_results.items(): self.assertEqual( postprocess(func(*args, **kwargs)), result,
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>import setuptools<|fim▁hole|> # NOTE: If edt.cpp does not exist: # cython -3 --fast-fail -v --cplus edt.pyx extra_compile_args = [] if sys.platform == 'win32': extra_compile_args += [ '/std:c++11', '/O2' ] else: extra_compile_args += [ '-std=c++11', '-O3', '-ffast-math', '-pthread' ] if sys.platform == 'darwin': extra_compile_args += [ '-stdlib=libc++', '-mmacosx-version-min=10.9' ] setuptools.setup( setup_requires=['pbr'], python_requires="~=3.6", # >= 3.6 < 4.0 ext_modules=[ setuptools.Extension( 'edt', sources=[ 'edt.cpp' ], language='c++', include_dirs=[ np.get_include() ], extra_compile_args=extra_compile_args, ), ], long_description_content_type='text/markdown', pbr=True )<|fim▁end|>
import sys import numpy as np
<|file_name|>analysis.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np import numpy.linalg def norm_along_column(a, ord=2): norm = lambda x: np.linalg.norm(x, ord=ord) return np.apply_along_axis(norm, 0, a)<|fim▁hole|> def eig_residul(a, b, x, v, rel=True): av = a @ v bv = b @ v rs = norm_along_column(av - x * bv) if rel: return rs / (norm_along_column(av) + np.abs(x) * norm_along_column(bv)) else: return rs<|fim▁end|>
<|file_name|>update.js<|end_file_name|><|fim▁begin|>/* * Copyright (c) 2014 * * This file is licensed under the Affero General Public License version 3 * or later. * * See the COPYING-README file. * */ (function() { OC.Update = { _started : false, /** * Start the upgrade process. * * @param $el progress list element */<|fim▁hole|> } var hasWarnings = false; this.$el = $el; this._started = true; this.addMessage(t( 'core', 'Updating {productName} to version {version}, this may take a while.', { productName: options.productName || 'ownCloud', version: options.version }), 'bold' ).append('<br />'); // FIXME: these should be ul/li with CSS paddings! var updateEventSource = new OC.EventSource(OC.webroot+'/core/ajax/update.php'); updateEventSource.listen('success', function(message) { $('<span>').append(message).append('<br />').appendTo($el); }); updateEventSource.listen('notice', function(message) { $('<span>').addClass('error').append(message).append('<br />').appendTo($el); hasWarnings = true; }); updateEventSource.listen('error', function(message) { $('<span>').addClass('error').append(message).append('<br />').appendTo($el); message = t('core', 'Please reload the page.'); $('<span>').addClass('error').append(message).append('<br />').appendTo($el); updateEventSource.close(); }); updateEventSource.listen('failure', function(message) { $('<span>').addClass('error').append(message).append('<br />').appendTo($el); $('<span>') .addClass('bold') .append(t('core', 'The update was unsuccessful. ' + 'Please report this issue to the ' + '<a href="https://github.com/owncloud/core/issues" target="_blank">ownCloud community</a>.')) .appendTo($el); }); updateEventSource.listen('done', function() { if (hasWarnings) { $('<span>').addClass('bold') .append('<br />') .append(t('core', 'The update was successful. There were warnings.')) .appendTo($el); var message = t('core', 'Please reload the page.'); $('<span>').append('<br />').append(message).append('<br />').appendTo($el); } else { // FIXME: use product name $('<span>').addClass('bold') .append('<br />') .append(t('core', 'The update was successful. Redirecting you to ownCloud now.')) .appendTo($el); setTimeout(function () { OC.redirect(OC.webroot); }, 3000); } }); }, addMessage: function(message, className) { var $span = $('<span>'); $span.addClass(className).append(message).append('<br />').appendTo(this.$el); return $span; } }; })(); $(document).ready(function() { $('.updateButton').on('click', function() { var $updateEl = $('.update'); var $progressEl = $('.updateProgress'); $progressEl.removeClass('hidden'); $('.updateOverview').addClass('hidden'); OC.Update.start($progressEl, { productName: $updateEl.attr('data-productname'), version: $updateEl.attr('data-version'), }); return false; }); });<|fim▁end|>
start: function($el, options) { if (this._started) { return;
<|file_name|>platemanager.py<|end_file_name|><|fim▁begin|>''' Created on 05.11.2013 @author: gena ''' from __future__ import print_function from PyQt4 import QtCore from escore.plate import Plate from escore.approximations import indexByName class PlateRecord(object): def __init__(self, plate, name,path): self.plate=plate self.name=name self.path=path class PlateManager(QtCore.QObject): ''' PlateManager holds all plates, and handles related actions, such as plate open,save,close,select, etc ''' signalPlateListUpdated=QtCore.pyqtSignal(QtCore.QStringList) signalCurrentPlateSet=QtCore.pyqtSignal(object) signalCurrentIndexChanged=QtCore.pyqtSignal(int) signalApproximationSelected = QtCore.pyqtSignal(int) def __init__(self, parent=None): super(PlateManager, self).__init__(parent) self.plates=[] self.currentPlateIndex = -1 self.defaultApproximationIndex=0 def getFileInfo(self,fileName): fileInfo=QtCore.QFileInfo(fileName) return fileInfo.baseName(), fileInfo.dir() def openPlate(self, fileName): plates = Plate.loadFromFile(fileName) for number,plate in enumerate(plates): plate.setParent(self) if plate.approximation is None:<|fim▁hole|> name,path = self.getFileInfo(fileName) if len(plates)>1: name+='_'+str(number+1) plateRecord=PlateRecord(plate,name,path) self.plates.append(plateRecord) plate.signalApplyReference.connect(self.applyReference) self.signalPlateListUpdated.emit(self.names()) if not self.isEmpty(): self.setCurrentPlate(0) def setApproximation(self, index): if self.defaultApproximationIndex==index: return self.defaultApproximationIndex=index if self.currentPlateIndex >= 0 : self.plates[self.currentPlateIndex].plate.setApproximation(index) self.signalApproximationSelected.emit(index) def openPlates(self, fileNameList): for fileName in fileNameList : self.openPlate(fileName) def savePlateAs(self,fileName): if self.currentPlateIndex < 0 : return plateRecord=self.plates[self.currentPlateIndex] plateRecord.plate.saveToFile(fileName) plateRecord.name,plateRecord.path = self.getFileInfo(fileName) self.signalPlateListUpdated.emit(self.names()) def savePlateWithDefaultName(self, index): plateRecord=self.plates[index] fileInfo=QtCore.QFileInfo(plateRecord.path,plateRecord.name+'.csv') plateRecord.plate.saveToFile(fileInfo.filePath()) def savePlate(self): if self.currentPlateIndex < 0 : return self.savePlateWithDefaultName(self.currentPlateIndex) def saveAllPlates(self): for index in range(len(self.plates)): self.savePlateWithDefaultName(index) def removePlate(self): if self.currentPlateIndex < 0 : return self.signalCurrentPlateSet.emit(None) self.plates[self.currentPlateIndex].plate.signalApplyReference.disconnect() del self.plates[self.currentPlateIndex] self.signalPlateListUpdated.emit(self.names()) if not self.isEmpty(): self.setCurrentPlate(0) def isDirty(self): return self.plates[self.currentPlateIndex].plate.dirty def isEmpty(self): return self.plates == [] def names(self): return QtCore.QStringList([QtCore.QString(record.name) for record in self.plates]) def setCurrentPlate(self, index): if self.currentPlateIndex == index : return self.currentPlateIndex = index if index >= 0: plate = self.plates[index].plate appindex= indexByName(plate.approximation.name) self.defaultApproximationIndex = appindex self.signalApproximationSelected.emit(appindex) else : plate = None self.signalCurrentIndexChanged.emit(self.currentPlateIndex) self.signalCurrentPlateSet.emit(plate) def applyReference(self, reference): print('Applying reference to all plates') sender = self.sender() for plateRecord in self.plates: plate = plateRecord.plate if not plate is sender: plate.setReference(reference)<|fim▁end|>
print("set default approximation for plate",self.defaultApproximationIndex) plate.setApproximation(self.defaultApproximationIndex)
<|file_name|>common.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /** Code that is useful in various trans modules. */ use back::{abi, upcall}; use driver::session; use driver::session::Session; use lib::llvm::{ModuleRef, ValueRef, TypeRef, BasicBlockRef, BuilderRef}; use lib::llvm::{True, False, Bool}; use lib::llvm::{llvm, TargetData, TypeNames, associate_type, name_has_type}; use lib; use metadata::common::LinkMeta; use middle::astencode; use middle::resolve; use middle::trans::adt; use middle::trans::base; use middle::trans::build; use middle::trans::datum; use middle::trans::debuginfo; use middle::trans::glue; use middle::trans::reachable; use middle::trans::shape; use middle::trans::type_of; use middle::trans::type_use; use middle::trans::write_guard; use middle::ty::substs; use middle::ty; use middle::typeck; use middle::borrowck::root_map_key; use util::ppaux::{Repr}; use core::cast::transmute; use core::hash; use core::hashmap::{HashMap, HashSet}; use core::libc::{c_uint, c_longlong, c_ulonglong}; use core::to_bytes; use core::vec::raw::to_ptr; use syntax::ast::ident; use syntax::ast_map::{path, path_elt}; use syntax::codemap::span; use syntax::parse::token::ident_interner; use syntax::{ast, ast_map}; use syntax::abi::{X86, X86_64, Arm, Mips}; pub type namegen = @fn(s: &str) -> ident; pub fn new_namegen(intr: @ident_interner) -> namegen { let f: @fn(s: &str) -> ident = |prefix| { intr.gensym(fmt!("%s_%u", prefix, intr.gensym(prefix).repr)) }; f } pub type addrspace = c_uint; // Address spaces communicate to LLVM which destructors need to run for // specific types. // 0 is ignored by the GC, and is used for all non-GC'd pointers. // 1 is for opaque GC'd boxes. // >= 2 are for specific types (e.g. resources). pub static default_addrspace: addrspace = 0; pub static gc_box_addrspace: addrspace = 1; pub type addrspace_gen = @fn() -> addrspace; pub fn new_addrspace_gen() -> addrspace_gen { let i = @mut 1; let result: addrspace_gen = || { *i += 1; *i }; result } pub struct tydesc_info { ty: ty::t, tydesc: ValueRef, size: ValueRef, align: ValueRef, addrspace: addrspace, take_glue: Option<ValueRef>, drop_glue: Option<ValueRef>, free_glue: Option<ValueRef>, visit_glue: Option<ValueRef> } /* * A note on nomenclature of linking: "extern", "foreign", and "upcall". * * An "extern" is an LLVM symbol we wind up emitting an undefined external * reference to. This means "we don't have the thing in this compilation unit, * please make sure you link it in at runtime". This could be a reference to * C code found in a C library, or rust code found in a rust crate. * * Most "externs" are implicitly declared (automatically) as a result of a * user declaring an extern _module_ dependency; this causes the rust driver * to locate an extern crate, scan its compilation metadata, and emit extern * declarations for any symbols used by the declaring crate. * * A "foreign" is an extern that references C (or other non-rust ABI) code. * There is no metadata to scan for extern references so in these cases either * a header-digester like bindgen, or manual function prototypes, have to * serve as declarators. So these are usually given explicitly as prototype * declarations, in rust code, with ABI attributes on them noting which ABI to * link via. * * An "upcall" is a foreign call generated by the compiler (not corresponding * to any user-written call in the code) into the runtime library, to perform * some helper task such as bringing a task to life, allocating memory, etc. * */ pub struct Stats { n_static_tydescs: uint, n_glues_created: uint, n_null_glues: uint, n_real_glues: uint, n_fns: uint, n_monos: uint, n_inlines: uint, n_closures: uint, llvm_insn_ctxt: @mut ~[~str], llvm_insns: @mut HashMap<~str, uint>, fn_times: @mut ~[(~str, int)] // (ident, time) } pub struct BuilderRef_res { B: BuilderRef, } impl Drop for BuilderRef_res { fn finalize(&self) { unsafe { llvm::LLVMDisposeBuilder(self.B); } } } pub fn BuilderRef_res(B: BuilderRef) -> BuilderRef_res { BuilderRef_res { B: B } } pub type ExternMap = @mut HashMap<@str, ValueRef>; // Crate context. Every crate we compile has one of these. pub struct CrateContext { sess: session::Session, llmod: ModuleRef, td: TargetData, tn: @TypeNames, externs: ExternMap, intrinsics: HashMap<~str, ValueRef>, item_vals: @mut HashMap<ast::node_id, ValueRef>, exp_map2: resolve::ExportMap2, reachable: reachable::map, item_symbols: @mut HashMap<ast::node_id, ~str>, link_meta: LinkMeta, enum_sizes: @mut HashMap<ty::t, uint>, discrims: @mut HashMap<ast::def_id, ValueRef>, discrim_symbols: @mut HashMap<ast::node_id, @~str>, tydescs: @mut HashMap<ty::t, @mut tydesc_info>, // Set when running emit_tydescs to enforce that no more tydescs are // created. finished_tydescs: @mut bool, // Track mapping of external ids to local items imported for inlining external: @mut HashMap<ast::def_id, Option<ast::node_id>>, // Cache instances of monomorphized functions monomorphized: @mut HashMap<mono_id, ValueRef>, monomorphizing: @mut HashMap<ast::def_id, uint>, // Cache computed type parameter uses (see type_use.rs) type_use_cache: @mut HashMap<ast::def_id, @~[type_use::type_uses]>, // Cache generated vtables vtables: @mut HashMap<mono_id, ValueRef>, // Cache of constant strings, const_cstr_cache: @mut HashMap<@~str, ValueRef>, // Reverse-direction for const ptrs cast from globals. // Key is an int, cast from a ValueRef holding a *T, // Val is a ValueRef holding a *[T]. // // Needed because LLVM loses pointer->pointee association // when we ptrcast, and we have to ptrcast during translation // of a [T] const because we form a slice, a [*T,int] pair, not // a pointer to an LLVM array type. const_globals: @mut HashMap<int, ValueRef>, // Cache of emitted const values const_values: @mut HashMap<ast::node_id, ValueRef>, // Cache of external const values extern_const_values: @mut HashMap<ast::def_id, ValueRef>, module_data: @mut HashMap<~str, ValueRef>, lltypes: @mut HashMap<ty::t, TypeRef>, llsizingtypes: @mut HashMap<ty::t, TypeRef>, adt_reprs: @mut HashMap<ty::t, @adt::Repr>, names: namegen, next_addrspace: addrspace_gen, symbol_hasher: @mut hash::State, type_hashcodes: @mut HashMap<ty::t, @str>, type_short_names: @mut HashMap<ty::t, ~str>, all_llvm_symbols: @mut HashSet<@~str>, tcx: ty::ctxt, maps: astencode::Maps, stats: @mut Stats, upcalls: @upcall::Upcalls, tydesc_type: TypeRef, int_type: TypeRef, float_type: TypeRef, task_type: TypeRef, opaque_vec_type: TypeRef, builder: BuilderRef_res, shape_cx: shape::Ctxt, crate_map: ValueRef, // Set when at least one function uses GC. Needed so that // decl_gc_metadata knows whether to link to the module metadata, which // is not emitted by LLVM's GC pass when no functions use GC. uses_gc: @mut bool, dbg_cx: Option<debuginfo::DebugContext>, do_not_commit_warning_issued: @mut bool } // Types used for llself. pub struct ValSelfData { v: ValueRef, t: ty::t, is_owned: bool } pub enum local_val { local_mem(ValueRef), local_imm(ValueRef), } // Here `self_ty` is the real type of the self parameter to this method. It // will only be set in the case of default methods. pub struct param_substs { tys: ~[ty::t], vtables: Option<typeck::vtable_res>, type_param_defs: @~[ty::TypeParameterDef], self_ty: Option<ty::t> } pub impl param_substs { fn validate(&self) { for self.tys.each |t| { assert!(!ty::type_needs_infer(*t)); } for self.self_ty.each |t| { assert!(!ty::type_needs_infer(*t)); } } } fn param_substs_to_str(this: &param_substs, tcx: ty::ctxt) -> ~str { fmt!("param_substs {tys:%s, vtables:%s, type_param_defs:%s}", this.tys.repr(tcx), this.vtables.repr(tcx), this.type_param_defs.repr(tcx)) } impl Repr for param_substs { fn repr(&self, tcx: ty::ctxt) -> ~str { param_substs_to_str(self, tcx) } } impl Repr for @param_substs { fn repr(&self, tcx: ty::ctxt) -> ~str { param_substs_to_str(*self, tcx) } } // Function context. Every LLVM function we create will have one of // these. pub struct fn_ctxt_ { // The ValueRef returned from a call to llvm::LLVMAddFunction; the // address of the first instruction in the sequence of // instructions for this function that will go in the .text // section of the executable we're generating. llfn: ValueRef, // The implicit environment argument that arrives in the function we're // creating. llenv: ValueRef, // The place to store the return value. If the return type is immediate, // this is an alloca in the function. Otherwise, it's the hidden first // parameter to the function. After function construction, this should // always be Some. llretptr: Option<ValueRef>, // These elements: "hoisted basic blocks" containing // administrative activities that have to happen in only one place in // the function, due to LLVM's quirks. // A block for all the function's static allocas, so that LLVM // will coalesce them into a single alloca call. llstaticallocas: BasicBlockRef, // A block containing code that copies incoming arguments to space // already allocated by code in one of the llallocas blocks. // (LLVM requires that arguments be copied to local allocas before // allowing most any operation to be performed on them.) llloadenv: Option<BasicBlockRef>, llreturn: BasicBlockRef, // The 'self' value currently in use in this function, if there // is one. // // NB: This is the type of the self *variable*, not the self *type*. The // self type is set only for default methods, while the self variable is // set for all methods. llself: Option<ValSelfData>, // The a value alloca'd for calls to upcalls.rust_personality. Used when // outputting the resume instruction. personality: Option<ValueRef>, // If this is a for-loop body that returns, this holds the pointers needed // for that (flagptr, retptr) loop_ret: Option<(ValueRef, ValueRef)>, // True if this function has an immediate return value, false otherwise. // If this is false, the llretptr will alias the first argument of the // function. has_immediate_return_value: bool, // Maps arguments to allocas created for them in llallocas. llargs: @mut HashMap<ast::node_id, local_val>, // Maps the def_ids for local variables to the allocas created for // them in llallocas. lllocals: @mut HashMap<ast::node_id, local_val>, // Same as above, but for closure upvars llupvars: @mut HashMap<ast::node_id, ValueRef>, // The node_id of the function, or -1 if it doesn't correspond to // a user-defined function. id: ast::node_id, // The def_id of the impl we're inside, or None if we aren't inside one. impl_id: Option<ast::def_id>, // If this function is being monomorphized, this contains the type // substitutions used. param_substs: Option<@param_substs>, // The source span and nesting context where this function comes from, for // error reporting and symbol generation. span: Option<span>, path: path, // This function's enclosing crate context. ccx: @@CrateContext } pub type fn_ctxt = @mut fn_ctxt_; pub fn warn_not_to_commit(ccx: @CrateContext, msg: &str) { if !*ccx.do_not_commit_warning_issued { *ccx.do_not_commit_warning_issued = true; ccx.sess.warn(msg.to_str() + ~" -- do not commit like this!"); } } // Heap selectors. Indicate which heap something should go on. #[deriving(Eq)] pub enum heap { heap_managed, heap_managed_unique, heap_exchange, } #[deriving(Eq)] pub enum cleantype { normal_exit_only, normal_exit_and_unwind } pub enum cleanup { clean(@fn(block) -> block, cleantype), clean_temp(ValueRef, @fn(block) -> block, cleantype), } // Used to remember and reuse existing cleanup paths // target: none means the path ends in an resume instruction pub struct cleanup_path { target: Option<BasicBlockRef>, dest: BasicBlockRef } pub fn scope_clean_changed(scope_info: &mut scope_info) { if scope_info.cleanup_paths.len() > 0u { scope_info.cleanup_paths = ~[]; } scope_info.landing_pad = None; } pub fn cleanup_type(cx: ty::ctxt, ty: ty::t) -> cleantype { if ty::type_needs_unwind_cleanup(cx, ty) { normal_exit_and_unwind } else { normal_exit_only } } // This is not the same as datum::Datum::root(), which is used to keep copies // of @ values live for as long as a borrowed pointer to the interior exists. // In the new GC, we can identify immediates on the stack without difficulty, // but have trouble knowing where non-immediates are on the stack. For // non-immediates, we must add an additional level of indirection, which // allows us to alloca a pointer with the right addrspace. pub fn root_for_cleanup(bcx: block, v: ValueRef, t: ty::t) -> (ValueRef, bool) { let ccx = bcx.ccx(); let addrspace = base::get_tydesc(ccx, t).addrspace; if addrspace > gc_box_addrspace { let llty = type_of::type_of_rooted(ccx, t); let root = base::alloca(bcx, llty); build::Store(bcx, build::PointerCast(bcx, v, llty), root); (root, true) } else { (v, false) } } pub fn add_clean(bcx: block, val: ValueRef, t: ty::t) { if !ty::type_needs_drop(bcx.tcx(), t) { return; } debug!("add_clean(%s, %s, %s)", bcx.to_str(), val_str(bcx.ccx().tn, val), t.repr(bcx.tcx())); let (root, rooted) = root_for_cleanup(bcx, val, t); let cleanup_type = cleanup_type(bcx.tcx(), t); do in_scope_cx(bcx) |scope_info| { scope_info.cleanups.push( clean(|a| glue::drop_ty_root(a, root, rooted, t), cleanup_type)); scope_clean_changed(scope_info); } } pub fn add_clean_temp_immediate(cx: block, val: ValueRef, ty: ty::t) { if !ty::type_needs_drop(cx.tcx(), ty) { return; } debug!("add_clean_temp_immediate(%s, %s, %s)", cx.to_str(), val_str(cx.ccx().tn, val), ty.repr(cx.tcx()));<|fim▁hole|> do in_scope_cx(cx) |scope_info| { scope_info.cleanups.push( clean_temp(val, |a| glue::drop_ty_immediate(a, val, ty), cleanup_type)); scope_clean_changed(scope_info); } } pub fn add_clean_temp_mem(bcx: block, val: ValueRef, t: ty::t) { if !ty::type_needs_drop(bcx.tcx(), t) { return; } debug!("add_clean_temp_mem(%s, %s, %s)", bcx.to_str(), val_str(bcx.ccx().tn, val), t.repr(bcx.tcx())); let (root, rooted) = root_for_cleanup(bcx, val, t); let cleanup_type = cleanup_type(bcx.tcx(), t); do in_scope_cx(bcx) |scope_info| { scope_info.cleanups.push( clean_temp(val, |a| glue::drop_ty_root(a, root, rooted, t), cleanup_type)); scope_clean_changed(scope_info); } } pub fn add_clean_return_to_mut(bcx: block, root_key: root_map_key, frozen_val_ref: ValueRef, bits_val_ref: ValueRef, filename_val: ValueRef, line_val: ValueRef) { //! When an `@mut` has been frozen, we have to //! call the lang-item `return_to_mut` when the //! freeze goes out of scope. We need to pass //! in both the value which was frozen (`frozen_val`) and //! the value (`bits_val_ref`) which was returned when the //! box was frozen initially. Here, both `frozen_val_ref` and //! `bits_val_ref` are in fact pointers to stack slots. debug!("add_clean_return_to_mut(%s, %s, %s)", bcx.to_str(), val_str(bcx.ccx().tn, frozen_val_ref), val_str(bcx.ccx().tn, bits_val_ref)); do in_scope_cx(bcx) |scope_info| { scope_info.cleanups.push( clean_temp( frozen_val_ref, |bcx| write_guard::return_to_mut(bcx, root_key, frozen_val_ref, bits_val_ref, filename_val, line_val), normal_exit_only)); scope_clean_changed(scope_info); } } pub fn add_clean_free(cx: block, ptr: ValueRef, heap: heap) { let free_fn = match heap { heap_managed | heap_managed_unique => { let f: @fn(block) -> block = |a| glue::trans_free(a, ptr); f } heap_exchange => { let f: @fn(block) -> block = |a| glue::trans_exchange_free(a, ptr); f } }; do in_scope_cx(cx) |scope_info| { scope_info.cleanups.push(clean_temp(ptr, free_fn, normal_exit_and_unwind)); scope_clean_changed(scope_info); } } // Note that this only works for temporaries. We should, at some point, move // to a system where we can also cancel the cleanup on local variables, but // this will be more involved. For now, we simply zero out the local, and the // drop glue checks whether it is zero. pub fn revoke_clean(cx: block, val: ValueRef) { do in_scope_cx(cx) |scope_info| { let scope_info = &mut *scope_info; // FIXME(#5074) workaround borrowck let cleanup_pos = vec::position( scope_info.cleanups, |cu| match *cu { clean_temp(v, _, _) if v == val => true, _ => false }); for cleanup_pos.each |i| { scope_info.cleanups = vec::append(vec::slice(scope_info.cleanups, 0u, *i).to_vec(), vec::slice(scope_info.cleanups, *i + 1u, scope_info.cleanups.len())); scope_clean_changed(scope_info); } } } pub fn block_cleanups(bcx: block) -> ~[cleanup] { match bcx.kind { block_non_scope => ~[], block_scope(inf) => /*bad*/copy inf.cleanups } } pub enum block_kind { // A scope at the end of which temporary values created inside of it are // cleaned up. May correspond to an actual block in the language, but also // to an implicit scope, for example, calls introduce an implicit scope in // which the arguments are evaluated and cleaned up. block_scope(@mut scope_info), // A non-scope block is a basic block created as a translation artifact // from translating code that expresses conditional logic rather than by // explicit { ... } block structure in the source language. It's called a // non-scope block because it doesn't introduce a new variable scope. block_non_scope, } pub struct scope_info { loop_break: Option<block>, loop_label: Option<ident>, // A list of functions that must be run at when leaving this // block, cleaning up any variables that were introduced in the // block. cleanups: ~[cleanup], // Existing cleanup paths that may be reused, indexed by destination and // cleared when the set of cleanups changes. cleanup_paths: ~[cleanup_path], // Unwinding landing pad. Also cleared when cleanups change. landing_pad: Option<BasicBlockRef>, } pub impl scope_info { fn empty_cleanups(&mut self) -> bool { self.cleanups.is_empty() } } pub trait get_node_info { fn info(&self) -> Option<NodeInfo>; } impl get_node_info for @ast::expr { fn info(&self) -> Option<NodeInfo> { Some(NodeInfo {id: self.id, callee_id: Some(self.callee_id), span: self.span}) } } impl get_node_info for ast::blk { fn info(&self) -> Option<NodeInfo> { Some(NodeInfo {id: self.node.id, callee_id: None, span: self.span}) } } impl get_node_info for Option<@ast::expr> { fn info(&self) -> Option<NodeInfo> { self.chain_ref(|s| s.info()) } } pub struct NodeInfo { id: ast::node_id, callee_id: Option<ast::node_id>, span: span } // Basic block context. We create a block context for each basic block // (single-entry, single-exit sequence of instructions) we generate from Rust // code. Each basic block we generate is attached to a function, typically // with many basic blocks per function. All the basic blocks attached to a // function are organized as a directed graph. pub struct block_ { // The BasicBlockRef returned from a call to // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic // block to the function pointed to by llfn. We insert // instructions into that block by way of this block context. // The block pointing to this one in the function's digraph. llbb: BasicBlockRef, terminated: bool, unreachable: bool, parent: Option<block>, // The 'kind' of basic block this is. kind: block_kind, // Is this block part of a landing pad? is_lpad: bool, // info about the AST node this block originated from, if any node_info: Option<NodeInfo>, // The function context for the function to which this block is // attached. fcx: fn_ctxt } pub fn block_(llbb: BasicBlockRef, parent: Option<block>, kind: block_kind, is_lpad: bool, node_info: Option<NodeInfo>, fcx: fn_ctxt) -> block_ { block_ { llbb: llbb, terminated: false, unreachable: false, parent: parent, kind: kind, is_lpad: is_lpad, node_info: node_info, fcx: fcx } } pub type block = @mut block_; pub fn mk_block(llbb: BasicBlockRef, parent: Option<block>, kind: block_kind, is_lpad: bool, node_info: Option<NodeInfo>, fcx: fn_ctxt) -> block { @mut block_(llbb, parent, kind, is_lpad, node_info, fcx) } // First two args are retptr, env pub static first_real_arg: uint = 2u; pub struct Result { bcx: block, val: ValueRef } pub fn rslt(bcx: block, val: ValueRef) -> Result { Result {bcx: bcx, val: val} } pub impl Result { fn unpack(&self, bcx: &mut block) -> ValueRef { *bcx = self.bcx; return self.val; } } pub fn ty_str(tn: @TypeNames, t: TypeRef) -> @str { return lib::llvm::type_to_str(tn, t); } pub fn val_ty(v: ValueRef) -> TypeRef { unsafe { return llvm::LLVMTypeOf(v); } } pub fn val_str(tn: @TypeNames, v: ValueRef) -> @str { return ty_str(tn, val_ty(v)); } pub fn in_scope_cx(cx: block, f: &fn(si: @mut scope_info)) { let mut cur = cx; loop { match cur.kind { block_scope(inf) => { debug!("in_scope_cx: selected cur=%s (cx=%s)", cur.to_str(), cx.to_str()); f(inf); return; } _ => () } cur = block_parent(cur); } } pub fn block_parent(cx: block) -> block { match cx.parent { Some(b) => b, None => cx.sess().bug(fmt!("block_parent called on root block %?", cx)) } } // Accessors pub impl block_ { fn ccx(@mut self) -> @CrateContext { *self.fcx.ccx } fn tcx(@mut self) -> ty::ctxt { self.fcx.ccx.tcx } fn sess(@mut self) -> Session { self.fcx.ccx.sess } fn node_id_to_str(@mut self, id: ast::node_id) -> ~str { ast_map::node_id_to_str(self.tcx().items, id, self.sess().intr()) } fn expr_to_str(@mut self, e: @ast::expr) -> ~str { e.repr(self.tcx()) } fn expr_is_lval(@mut self, e: @ast::expr) -> bool { ty::expr_is_lval(self.tcx(), self.ccx().maps.method_map, e) } fn expr_kind(@mut self, e: @ast::expr) -> ty::ExprKind { ty::expr_kind(self.tcx(), self.ccx().maps.method_map, e) } fn def(@mut self, nid: ast::node_id) -> ast::def { match self.tcx().def_map.find(&nid) { Some(&v) => v, None => { self.tcx().sess.bug(fmt!( "No def associated with node id %?", nid)); } } } fn val_str(@mut self, val: ValueRef) -> @str { val_str(self.ccx().tn, val) } fn llty_str(@mut self, llty: TypeRef) -> @str { ty_str(self.ccx().tn, llty) } fn ty_to_str(@mut self, t: ty::t) -> ~str { t.repr(self.tcx()) } fn to_str(@mut self) -> ~str { unsafe { match self.node_info { Some(node_info) => fmt!("[block %d]", node_info.id), None => fmt!("[block %x]", transmute(&*self)), } } } } // LLVM type constructors. pub fn T_void() -> TypeRef { unsafe { return llvm::LLVMVoidType(); } } pub fn T_nil() -> TypeRef { return T_struct(~[], false) } pub fn T_metadata() -> TypeRef { unsafe { return llvm::LLVMMetadataType(); } } pub fn T_i1() -> TypeRef { unsafe { return llvm::LLVMInt1Type(); } } pub fn T_i8() -> TypeRef { unsafe { return llvm::LLVMInt8Type(); } } pub fn T_i16() -> TypeRef { unsafe { return llvm::LLVMInt16Type(); } } pub fn T_i32() -> TypeRef { unsafe { return llvm::LLVMInt32Type(); } } pub fn T_i64() -> TypeRef { unsafe { return llvm::LLVMInt64Type(); } } pub fn T_f32() -> TypeRef { unsafe { return llvm::LLVMFloatType(); } } pub fn T_f64() -> TypeRef { unsafe { return llvm::LLVMDoubleType(); } } pub fn T_bool() -> TypeRef { return T_i8(); } pub fn T_int(targ_cfg: @session::config) -> TypeRef { return match targ_cfg.arch { X86 => T_i32(), X86_64 => T_i64(), Arm => T_i32(), Mips => T_i32() }; } pub fn T_int_ty(cx: @CrateContext, t: ast::int_ty) -> TypeRef { match t { ast::ty_i => cx.int_type, ast::ty_char => T_char(), ast::ty_i8 => T_i8(), ast::ty_i16 => T_i16(), ast::ty_i32 => T_i32(), ast::ty_i64 => T_i64() } } pub fn T_uint_ty(cx: @CrateContext, t: ast::uint_ty) -> TypeRef { match t { ast::ty_u => cx.int_type, ast::ty_u8 => T_i8(), ast::ty_u16 => T_i16(), ast::ty_u32 => T_i32(), ast::ty_u64 => T_i64() } } pub fn T_float_ty(cx: @CrateContext, t: ast::float_ty) -> TypeRef { match t { ast::ty_f => cx.float_type, ast::ty_f32 => T_f32(), ast::ty_f64 => T_f64() } } pub fn T_float(targ_cfg: @session::config) -> TypeRef { return match targ_cfg.arch { X86 => T_f64(), X86_64 => T_f64(), Arm => T_f64(), Mips => T_f64() }; } pub fn T_char() -> TypeRef { return T_i32(); } pub fn T_size_t(targ_cfg: @session::config) -> TypeRef { return T_int(targ_cfg); } pub fn T_fn(inputs: &[TypeRef], output: TypeRef) -> TypeRef { unsafe { return llvm::LLVMFunctionType(output, to_ptr(inputs), inputs.len() as c_uint, False); } } pub fn T_fn_pair(cx: @CrateContext, tfn: TypeRef) -> TypeRef { return T_struct(~[T_ptr(tfn), T_opaque_cbox_ptr(cx)], false); } pub fn T_ptr(t: TypeRef) -> TypeRef { unsafe { return llvm::LLVMPointerType(t, default_addrspace); } } pub fn T_root(t: TypeRef, addrspace: addrspace) -> TypeRef { unsafe { return llvm::LLVMPointerType(t, addrspace); } } pub fn T_struct(elts: &[TypeRef], packed: bool) -> TypeRef { unsafe { return llvm::LLVMStructType(to_ptr(elts), elts.len() as c_uint, packed as Bool); } } pub fn T_named_struct(name: &str) -> TypeRef { unsafe { let c = llvm::LLVMGetGlobalContext(); return str::as_c_str(name, |buf| llvm::LLVMStructCreateNamed(c, buf)); } } pub fn set_struct_body(t: TypeRef, elts: &[TypeRef], packed: bool) { unsafe { llvm::LLVMStructSetBody(t, to_ptr(elts), elts.len() as c_uint, packed as Bool); } } pub fn T_empty_struct() -> TypeRef { return T_struct(~[], false); } // A vtable is, in reality, a vtable pointer followed by zero or more pointers // to tydescs and other vtables that it closes over. But the types and number // of those are rarely known to the code that needs to manipulate them, so // they are described by this opaque type. pub fn T_vtable() -> TypeRef { T_array(T_ptr(T_i8()), 1u) } pub fn T_task(targ_cfg: @session::config) -> TypeRef { let t = T_named_struct(~"task"); // Refcount // Delegate pointer // Stack segment pointer // Runtime SP // Rust SP // GC chain // Domain pointer // Crate cache pointer let t_int = T_int(targ_cfg); let elems = ~[t_int, t_int, t_int, t_int, t_int, t_int, t_int, t_int]; set_struct_body(t, elems, false); return t; } pub fn T_tydesc_field(cx: @CrateContext, field: uint) -> TypeRef { // Bit of a kludge: pick the fn typeref out of the tydesc.. unsafe { let mut tydesc_elts: ~[TypeRef] = vec::from_elem::<TypeRef>(abi::n_tydesc_fields, T_nil()); llvm::LLVMGetStructElementTypes( cx.tydesc_type, ptr::to_mut_unsafe_ptr(&mut tydesc_elts[0])); let t = llvm::LLVMGetElementType(tydesc_elts[field]); return t; } } pub fn T_generic_glue_fn(cx: @CrateContext) -> TypeRef { let s = @"glue_fn"; match name_has_type(cx.tn, s) { Some(t) => return t, _ => () } let t = T_tydesc_field(cx, abi::tydesc_field_drop_glue); associate_type(cx.tn, s, t); return t; } pub fn T_tydesc(targ_cfg: @session::config) -> TypeRef { let tydesc = T_named_struct(~"tydesc"); let tydescpp = T_ptr(T_ptr(tydesc)); let pvoid = T_ptr(T_i8()); let glue_fn_ty = T_ptr(T_fn(~[T_ptr(T_nil()), T_ptr(T_nil()), tydescpp, pvoid], T_void())); let int_type = T_int(targ_cfg); let elems = ~[int_type, int_type, glue_fn_ty, glue_fn_ty, glue_fn_ty, glue_fn_ty, T_ptr(T_i8()), T_ptr(T_i8())]; set_struct_body(tydesc, elems, false); return tydesc; } pub fn T_array(t: TypeRef, n: uint) -> TypeRef { unsafe { return llvm::LLVMArrayType(t, n as c_uint); } } pub fn T_vector(t: TypeRef, n: uint) -> TypeRef { unsafe { return llvm::LLVMVectorType(t, n as c_uint); } } // Interior vector. pub fn T_vec2(targ_cfg: @session::config, t: TypeRef) -> TypeRef { return T_struct(~[T_int(targ_cfg), // fill T_int(targ_cfg), // alloc T_array(t, 0u)], // elements false); } pub fn T_vec(ccx: @CrateContext, t: TypeRef) -> TypeRef { return T_vec2(ccx.sess.targ_cfg, t); } // Note that the size of this one is in bytes. pub fn T_opaque_vec(targ_cfg: @session::config) -> TypeRef { return T_vec2(targ_cfg, T_i8()); } // Let T be the content of a box @T. tuplify_box_ty(t) returns the // representation of @T as a tuple (i.e., the ty::t version of what T_box() // returns). pub fn tuplify_box_ty(tcx: ty::ctxt, t: ty::t) -> ty::t { let ptr = ty::mk_ptr( tcx, ty::mt {ty: ty::mk_nil(), mutbl: ast::m_imm} ); return ty::mk_tup(tcx, ~[ty::mk_uint(), ty::mk_type(tcx), ptr, ptr, t]); } pub fn T_box_header_fields(cx: @CrateContext) -> ~[TypeRef] { let ptr = T_ptr(T_i8()); return ~[cx.int_type, T_ptr(cx.tydesc_type), ptr, ptr]; } pub fn T_box_header(cx: @CrateContext) -> TypeRef { return T_struct(T_box_header_fields(cx), false); } pub fn T_box(cx: @CrateContext, t: TypeRef) -> TypeRef { return T_struct(vec::append(T_box_header_fields(cx), ~[t]), false); } pub fn T_box_ptr(t: TypeRef) -> TypeRef { unsafe { return llvm::LLVMPointerType(t, gc_box_addrspace); } } pub fn T_opaque_box(cx: @CrateContext) -> TypeRef { return T_box(cx, T_i8()); } pub fn T_opaque_box_ptr(cx: @CrateContext) -> TypeRef { return T_box_ptr(T_opaque_box(cx)); } pub fn T_unique(cx: @CrateContext, t: TypeRef) -> TypeRef { return T_struct(vec::append(T_box_header_fields(cx), ~[t]), false); } pub fn T_unique_ptr(t: TypeRef) -> TypeRef { unsafe { return llvm::LLVMPointerType(t, gc_box_addrspace); } } pub fn T_port(cx: @CrateContext, _t: TypeRef) -> TypeRef { return T_struct(~[cx.int_type], false); // Refcount } pub fn T_chan(cx: @CrateContext, _t: TypeRef) -> TypeRef { return T_struct(~[cx.int_type], false); // Refcount } pub fn T_taskptr(cx: @CrateContext) -> TypeRef { return T_ptr(cx.task_type); } pub fn T_opaque_cbox_ptr(cx: @CrateContext) -> TypeRef { // closures look like boxes (even when they are ~fn or &fn) // see trans_closure.rs return T_opaque_box_ptr(cx); } pub fn T_enum_discrim(cx: @CrateContext) -> TypeRef { return cx.int_type; } pub fn T_captured_tydescs(cx: @CrateContext, n: uint) -> TypeRef { return T_struct(vec::from_elem::<TypeRef>(n, T_ptr(cx.tydesc_type)), false); } pub fn T_opaque_trait(cx: @CrateContext, store: ty::TraitStore) -> TypeRef { match store { ty::BoxTraitStore => { T_struct(~[T_ptr(cx.tydesc_type), T_opaque_box_ptr(cx)], false) } ty::UniqTraitStore => { T_struct(~[T_ptr(cx.tydesc_type), T_unique_ptr(T_unique(cx, T_i8())), T_ptr(cx.tydesc_type)], false) } ty::RegionTraitStore(_) => { T_struct(~[T_ptr(cx.tydesc_type), T_ptr(T_i8())], false) } } } pub fn T_opaque_port_ptr() -> TypeRef { return T_ptr(T_i8()); } pub fn T_opaque_chan_ptr() -> TypeRef { return T_ptr(T_i8()); } // LLVM constant constructors. pub fn C_null(t: TypeRef) -> ValueRef { unsafe { return llvm::LLVMConstNull(t); } } pub fn C_undef(t: TypeRef) -> ValueRef { unsafe { return llvm::LLVMGetUndef(t); } } pub fn C_integral(t: TypeRef, u: u64, sign_extend: Bool) -> ValueRef { unsafe { return llvm::LLVMConstInt(t, u, sign_extend); } } pub fn C_floating(s: &str, t: TypeRef) -> ValueRef { unsafe { return str::as_c_str(s, |buf| llvm::LLVMConstRealOfString(t, buf)); } } pub fn C_nil() -> ValueRef { return C_struct(~[]); } pub fn C_bool(b: bool) -> ValueRef { C_integral(T_bool(), if b { 1u64 } else { 0u64 }, False) } pub fn C_i1(b: bool) -> ValueRef { return C_integral(T_i1(), if b { 1 } else { 0 }, False); } pub fn C_i32(i: i32) -> ValueRef { return C_integral(T_i32(), i as u64, True); } pub fn C_i64(i: i64) -> ValueRef { return C_integral(T_i64(), i as u64, True); } pub fn C_int(cx: @CrateContext, i: int) -> ValueRef { return C_integral(cx.int_type, i as u64, True); } pub fn C_uint(cx: @CrateContext, i: uint) -> ValueRef { return C_integral(cx.int_type, i as u64, False); } pub fn C_u8(i: uint) -> ValueRef { return C_integral(T_i8(), i as u64, False); } // This is a 'c-like' raw string, which differs from // our boxed-and-length-annotated strings. pub fn C_cstr(cx: @CrateContext, s: @~str) -> ValueRef { unsafe { match cx.const_cstr_cache.find(&s) { Some(&llval) => return llval, None => () } let sc = do str::as_c_str(*s) |buf| { llvm::LLVMConstString(buf, s.len() as c_uint, False) }; let g = str::as_c_str(fmt!("str%u", (cx.names)("str").repr), |buf| llvm::LLVMAddGlobal(cx.llmod, val_ty(sc), buf)); llvm::LLVMSetInitializer(g, sc); llvm::LLVMSetGlobalConstant(g, True); lib::llvm::SetLinkage(g, lib::llvm::InternalLinkage); cx.const_cstr_cache.insert(s, g); return g; } } // NB: Do not use `do_spill_noroot` to make this into a constant string, or // you will be kicked off fast isel. See issue #4352 for an example of this. pub fn C_estr_slice(cx: @CrateContext, s: @~str) -> ValueRef { unsafe { let len = s.len(); let cs = llvm::LLVMConstPointerCast(C_cstr(cx, s), T_ptr(T_i8())); C_struct(~[cs, C_uint(cx, len + 1u /* +1 for null */)]) } } // Returns a Plain Old LLVM String: pub fn C_postr(s: &str) -> ValueRef { unsafe { return do str::as_c_str(s) |buf| { llvm::LLVMConstString(buf, str::len(s) as c_uint, False) }; } } pub fn C_zero_byte_arr(size: uint) -> ValueRef { unsafe { let mut i = 0u; let mut elts: ~[ValueRef] = ~[]; while i < size { elts.push(C_u8(0u)); i += 1u; } return llvm::LLVMConstArray(T_i8(), vec::raw::to_ptr(elts), elts.len() as c_uint); } } pub fn C_struct(elts: &[ValueRef]) -> ValueRef { unsafe { do vec::as_imm_buf(elts) |ptr, len| { llvm::LLVMConstStruct(ptr, len as c_uint, False) } } } pub fn C_packed_struct(elts: &[ValueRef]) -> ValueRef { unsafe { do vec::as_imm_buf(elts) |ptr, len| { llvm::LLVMConstStruct(ptr, len as c_uint, True) } } } pub fn C_named_struct(T: TypeRef, elts: &[ValueRef]) -> ValueRef { unsafe { do vec::as_imm_buf(elts) |ptr, len| { llvm::LLVMConstNamedStruct(T, ptr, len as c_uint) } } } pub fn C_array(ty: TypeRef, elts: &[ValueRef]) -> ValueRef { unsafe { return llvm::LLVMConstArray(ty, vec::raw::to_ptr(elts), elts.len() as c_uint); } } pub fn C_bytes(bytes: &[u8]) -> ValueRef { unsafe { return llvm::LLVMConstString( cast::transmute(vec::raw::to_ptr(bytes)), bytes.len() as c_uint, True); } } pub fn C_bytes_plus_null(bytes: &[u8]) -> ValueRef { unsafe { return llvm::LLVMConstString( cast::transmute(vec::raw::to_ptr(bytes)), bytes.len() as c_uint, False); } } pub fn C_shape(ccx: @CrateContext, bytes: ~[u8]) -> ValueRef { unsafe { let llshape = C_bytes_plus_null(bytes); let name = fmt!("shape%u", (ccx.names)("shape").repr); let llglobal = str::as_c_str(name, |buf| { llvm::LLVMAddGlobal(ccx.llmod, val_ty(llshape), buf) }); llvm::LLVMSetInitializer(llglobal, llshape); llvm::LLVMSetGlobalConstant(llglobal, True); lib::llvm::SetLinkage(llglobal, lib::llvm::InternalLinkage); return llvm::LLVMConstPointerCast(llglobal, T_ptr(T_i8())); } } pub fn get_param(fndecl: ValueRef, param: uint) -> ValueRef { unsafe { llvm::LLVMGetParam(fndecl, param as c_uint) } } pub fn const_get_elt(cx: @CrateContext, v: ValueRef, us: &[c_uint]) -> ValueRef { unsafe { let r = do vec::as_imm_buf(us) |p, len| { llvm::LLVMConstExtractValue(v, p, len as c_uint) }; debug!("const_get_elt(v=%s, us=%?, r=%s)", val_str(cx.tn, v), us, val_str(cx.tn, r)); return r; } } pub fn const_to_int(v: ValueRef) -> c_longlong { unsafe { llvm::LLVMConstIntGetSExtValue(v) } } pub fn const_to_uint(v: ValueRef) -> c_ulonglong { unsafe { llvm::LLVMConstIntGetZExtValue(v) } } pub fn is_undef(val: ValueRef) -> bool { unsafe { llvm::LLVMIsUndef(val) != False } } pub fn is_null(val: ValueRef) -> bool { unsafe { llvm::LLVMIsNull(val) != False } } // Used to identify cached monomorphized functions and vtables #[deriving(Eq)] pub enum mono_param_id { mono_precise(ty::t, Option<~[mono_id]>), mono_any, mono_repr(uint /* size */, uint /* align */, MonoDataClass, datum::DatumMode), } #[deriving(Eq)] pub enum MonoDataClass { MonoBits, // Anything not treated differently from arbitrary integer data MonoNonNull, // Non-null pointers (used for optional-pointer optimization) // FIXME(#3547)---scalars and floats are // treated differently in most ABIs. But we // should be doing something more detailed // here. MonoFloat } pub fn mono_data_classify(t: ty::t) -> MonoDataClass { match ty::get(t).sty { ty::ty_float(_) => MonoFloat, ty::ty_rptr(*) | ty::ty_uniq(*) | ty::ty_box(*) | ty::ty_opaque_box(*) | ty::ty_estr(ty::vstore_uniq) | ty::ty_evec(_, ty::vstore_uniq) | ty::ty_estr(ty::vstore_box) | ty::ty_evec(_, ty::vstore_box) | ty::ty_bare_fn(*) => MonoNonNull, // Is that everything? Would closures or slices qualify? _ => MonoBits } } #[deriving(Eq)] pub struct mono_id_ { def: ast::def_id, params: ~[mono_param_id], impl_did_opt: Option<ast::def_id> } pub type mono_id = @mono_id_; #[cfg(stage0)] impl to_bytes::IterBytes for mono_param_id { fn iter_bytes(&self, lsb0: bool, f: to_bytes::Cb) { match *self { mono_precise(t, ref mids) => to_bytes::iter_bytes_3(&0u8, &ty::type_id(t), mids, lsb0, f), mono_any => 1u8.iter_bytes(lsb0, f), mono_repr(ref a, ref b, ref c, ref d) => to_bytes::iter_bytes_5(&2u8, a, b, c, d, lsb0, f) } } } #[cfg(not(stage0))] impl to_bytes::IterBytes for mono_param_id { fn iter_bytes(&self, lsb0: bool, f: to_bytes::Cb) -> bool { match *self { mono_precise(t, ref mids) => to_bytes::iter_bytes_3(&0u8, &ty::type_id(t), mids, lsb0, f), mono_any => 1u8.iter_bytes(lsb0, f), mono_repr(ref a, ref b, ref c, ref d) => to_bytes::iter_bytes_5(&2u8, a, b, c, d, lsb0, f) } } } #[cfg(stage0)] impl to_bytes::IterBytes for MonoDataClass { fn iter_bytes(&self, lsb0: bool, f:to_bytes::Cb) { (*self as u8).iter_bytes(lsb0, f) } } #[cfg(not(stage0))] impl to_bytes::IterBytes for MonoDataClass { fn iter_bytes(&self, lsb0: bool, f:to_bytes::Cb) -> bool { (*self as u8).iter_bytes(lsb0, f) } } #[cfg(stage0)] impl to_bytes::IterBytes for mono_id_ { fn iter_bytes(&self, lsb0: bool, f: to_bytes::Cb) { to_bytes::iter_bytes_2(&self.def, &self.params, lsb0, f); } } #[cfg(not(stage0))] impl to_bytes::IterBytes for mono_id_ { fn iter_bytes(&self, lsb0: bool, f: to_bytes::Cb) -> bool { to_bytes::iter_bytes_2(&self.def, &self.params, lsb0, f) } } pub fn umax(cx: block, a: ValueRef, b: ValueRef) -> ValueRef { let cond = build::ICmp(cx, lib::llvm::IntULT, a, b); return build::Select(cx, cond, b, a); } pub fn umin(cx: block, a: ValueRef, b: ValueRef) -> ValueRef { let cond = build::ICmp(cx, lib::llvm::IntULT, a, b); return build::Select(cx, cond, a, b); } pub fn align_to(cx: block, off: ValueRef, align: ValueRef) -> ValueRef { let mask = build::Sub(cx, align, C_int(cx.ccx(), 1)); let bumped = build::Add(cx, off, mask); return build::And(cx, bumped, build::Not(cx, mask)); } pub fn path_str(sess: session::Session, p: &[path_elt]) -> ~str { let mut r = ~"", first = true; for p.each |e| { match *e { ast_map::path_name(s) | ast_map::path_mod(s) => { if first { first = false; } else { r += ~"::"; } r += *sess.str_of(s); } } } r } pub fn monomorphize_type(bcx: block, t: ty::t) -> ty::t { match bcx.fcx.param_substs { Some(substs) => { ty::subst_tps(bcx.tcx(), substs.tys, substs.self_ty, t) } _ => { assert!(!ty::type_has_params(t)); t } } } pub fn node_id_type(bcx: block, id: ast::node_id) -> ty::t { let tcx = bcx.tcx(); let t = ty::node_id_to_type(tcx, id); monomorphize_type(bcx, t) } pub fn expr_ty(bcx: block, ex: @ast::expr) -> ty::t { node_id_type(bcx, ex.id) } pub fn expr_ty_adjusted(bcx: block, ex: @ast::expr) -> ty::t { let tcx = bcx.tcx(); let t = ty::expr_ty_adjusted(tcx, ex); monomorphize_type(bcx, t) } pub fn node_id_type_params(bcx: block, id: ast::node_id) -> ~[ty::t] { let tcx = bcx.tcx(); let params = ty::node_id_to_type_params(tcx, id); if !params.all(|t| !ty::type_needs_infer(*t)) { bcx.sess().bug( fmt!("Type parameters for node %d include inference types: %s", id, str::connect(params.map(|t| bcx.ty_to_str(*t)), ","))); } match bcx.fcx.param_substs { Some(substs) => { do vec::map(params) |t| { ty::subst_tps(tcx, substs.tys, substs.self_ty, *t) } } _ => params } } pub fn node_vtables(bcx: block, id: ast::node_id) -> Option<typeck::vtable_res> { let raw_vtables = bcx.ccx().maps.vtable_map.find(&id); raw_vtables.map( |&vts| resolve_vtables_in_fn_ctxt(bcx.fcx, *vts)) } pub fn resolve_vtables_in_fn_ctxt(fcx: fn_ctxt, vts: typeck::vtable_res) -> typeck::vtable_res { @vec::map(*vts, |d| resolve_vtable_in_fn_ctxt(fcx, copy *d)) } // Apply the typaram substitutions in the fn_ctxt to a vtable. This should // eliminate any vtable_params. pub fn resolve_vtable_in_fn_ctxt(fcx: fn_ctxt, vt: typeck::vtable_origin) -> typeck::vtable_origin { let tcx = fcx.ccx.tcx; match vt { typeck::vtable_static(trait_id, tys, sub) => { let tys = match fcx.param_substs { Some(substs) => { do vec::map(tys) |t| { ty::subst_tps(tcx, substs.tys, substs.self_ty, *t) } } _ => tys }; typeck::vtable_static(trait_id, tys, resolve_vtables_in_fn_ctxt(fcx, sub)) } typeck::vtable_param(n_param, n_bound) => { match fcx.param_substs { Some(substs) => { find_vtable(tcx, substs, n_param, n_bound) } _ => { tcx.sess.bug(fmt!( "resolve_vtable_in_fn_ctxt: asked to lookup but \ no vtables in the fn_ctxt!")) } } } } } pub fn find_vtable(tcx: ty::ctxt, ps: &param_substs, n_param: uint, n_bound: uint) -> typeck::vtable_origin { debug!("find_vtable(n_param=%u, n_bound=%u, ps=%s)", n_param, n_bound, ps.repr(tcx)); // Vtables are stored in a flat array, finding the right one is // somewhat awkward let first_n_type_param_defs = ps.type_param_defs.slice(0, n_param); let vtables_to_skip = ty::count_traits_and_supertraits(tcx, first_n_type_param_defs); let vtable_off = vtables_to_skip + n_bound; /*bad*/ copy ps.vtables.get()[vtable_off] } pub fn dummy_substs(tps: ~[ty::t]) -> ty::substs { substs { self_r: Some(ty::re_bound(ty::br_self)), self_ty: None, tps: tps } } pub fn filename_and_line_num_from_span(bcx: block, span: span) -> (ValueRef, ValueRef) { let loc = bcx.sess().parse_sess.cm.lookup_char_pos(span.lo); let filename_cstr = C_cstr(bcx.ccx(), @/*bad*/copy loc.file.name); let filename = build::PointerCast(bcx, filename_cstr, T_ptr(T_i8())); let line = C_int(bcx.ccx(), loc.line as int); (filename, line) } // Casts a Rust bool value to an i1. pub fn bool_to_i1(bcx: block, llval: ValueRef) -> ValueRef { build::ICmp(bcx, lib::llvm::IntNE, llval, C_bool(false)) }<|fim▁end|>
let cleanup_type = cleanup_type(cx.tcx(), ty);
<|file_name|>relational.py<|end_file_name|><|fim▁begin|>""" Copyright (c) 2017-2022, Vanessa Sochat All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from sqlalchemy import create_engine from sqlalchemy.orm import scoped_session, sessionmaker from sqlalchemy.ext.declarative import declarative_base from expfactory.logger import bot from expfactory.utils import write_json from expfactory.defaults import EXPFACTORY_SUBID, EXPFACTORY_DATA from glob import glob import os import uuid import pickle import json import sys # RELATIONAL ################################################################### # # This is an Expfactory Flask Server database plugin. It implements common # functions (generate_subid, save_data, init_db) that should prepare a # database and perform actions to save data to it. The functions are added # to the main application upon initialization of the server. This relational # module has support for sqlite3, mysql, and postgres # ################################################################################ def generate_subid(self, token=None, return_user=False): """generate a new user in the database, still session based so we create a new identifier. """ from expfactory.database.models import Participant if not token: p = Participant() else: p = Participant(token=token) self.session.add(p) self.session.commit() if return_user is True: return p return p.id def print_user(self, user): """print a relational database user""" status = "active" token = user.token if token in ["finished", "revoked"]: status = token if token is None: token = "" subid = "%s\t%s[%s]" % (user.id, token, status) print(subid) return subid def list_users(self, user=None): """list users, each having a model in the database. A headless experiment will use protected tokens, and interactive will be based on auto- incremented ids. """ from expfactory.database.models import Participant participants = Participant.query.all() users = [] for user in participants: users.append(self.print_user(user)) return users # Actions ###################################################################### def generate_user(self): """generate a new user in the database, still session based so we create a new identifier. This function is called from the users new entrypoint, and it assumes we want a user generated with a token. """ token = str(uuid.uuid4()) return self.generate_subid(token=token, return_user=True) def finish_user(self, subid): """finish user will remove a user's token, making the user entry not accesible if running in headless model""" p = self.revoke_token(subid) p.token = "finished" self.session.commit() return p def restart_user(self, subid): """restart a user, which means revoking and issuing a new token.""" p = self.revoke_token(subid) p = self.refresh_token(subid) return p # Tokens ####################################################################### def validate_token(self, token): """retrieve a subject based on a token. Valid means we return a participant invalid means we return None """ from expfactory.database.models import Participant p = Participant.query.filter(Participant.token == token).first() if p is not None: if p.token.endswith(("finished", "revoked")): p = None else: p = p.id return p def revoke_token(self, subid): """revoke a token by removing it. Is done at finish, and also available as a command line option""" from expfactory.database.models import Participant <|fim▁hole|> self.session.commit() return p def refresh_token(self, subid): """refresh or generate a new token for a user""" from expfactory.database.models import Participant p = Participant.query.filter(Participant.id == subid).first() if p is not None: p.token = str(uuid.uuid4()) self.session.commit() return p def save_data(self, session, exp_id, content): """save data will obtain the current subid from the session, and save it depending on the database type. Currently we just support flat files""" from expfactory.database.models import Participant, Result subid = session.get("subid") token = session.get("token") self.logger.info("Saving data for subid %s" % subid) # We only attempt save if there is a subject id, set at start if subid is not None: p = Participant.query.filter( Participant.id == subid ).first() # better query here # Does if self.headless and p.token != token: self.logger.warning( "%s attempting to use mismatched token [%s] skipping save" % (p.id, token) ) elif self.headless and p.token.endswith(("finished", "revoked")): self.logger.warning( "%s attempting to use expired token [%s] skipping save" % (p.id, token) ) else: # Preference is to save data under 'data', otherwise do all of it if "data" in content: content = content["data"] result = Result( data=content, exp_id=exp_id, participant_id=p.id ) # check if changes from str/int # Create and save the result self.session.add(result) p.results.append(result) self.session.commit() self.logger.info("Save [participant] %s [result] %s" % (p, result)) Base = declarative_base() def init_db(self): """initialize the database, with the default database path or custom with a format corresponding to the database type: Examples: sqlite:////scif/data/expfactory.db """ # The user can provide a custom string if self.database is None: self.logger.error("You must provide a database url, exiting.") sys.exit(1) self.engine = create_engine(self.database, convert_unicode=True) self.session = scoped_session( sessionmaker(autocommit=False, autoflush=False, bind=self.engine) ) # Database Setup Base.query = self.session.query_property() # import all modules here that might define models so that # they will be registered properly on the metadata. Otherwise # you will have to import them first before calling init_db() import expfactory.database.models self.Base = Base self.Base.metadata.create_all(bind=self.engine)<|fim▁end|>
p = Participant.query.filter(Participant.id == subid).first() if p is not None: p.token = "revoked"